Compare commits
1087 Commits
v2.3.0
...
feat/ui/no
Author | SHA1 | Date | |
---|---|---|---|
fc52cab590 | |||
be0a033b90 | |||
688d3a9453 | |||
7f2dcbb66a | |||
5cadd74a81 | |||
db0bc47f67 | |||
262126aaea | |||
1e12c9b21f | |||
162bcda49e | |||
2b53ce50e0 | |||
1933184ca7 | |||
cc1e6374a6 | |||
a65ad1b42f | |||
16f39978e9 | |||
4e5ac85567 | |||
dffdca674e | |||
0ad0efcc44 | |||
daaf41daab | |||
1a22d50269 | |||
53e3cf162a | |||
61cf59d4f6 | |||
ecb5bdaf7e | |||
f6cdff2c5b | |||
f600104e80 | |||
fff55bd991 | |||
64f044a984 | |||
a15300ac8d | |||
fb0ec1c8d0 | |||
ae172b74a4 | |||
63d10027a4 | |||
ef0773b8a3 | |||
3daaddf15b | |||
570c3fe690 | |||
cbd1a7263a | |||
7fc5fbd4ce | |||
6f6de402ad | |||
1284bab4af | |||
20bf47e9cd | |||
978bde315b | |||
caa1bf9d17 | |||
50eb02f68b | |||
d73f3adc43 | |||
116107f464 | |||
da44bb1707 | |||
f43aed677e | |||
0d051aaae2 | |||
e4e48ff995 | |||
442a6bffa4 | |||
dfb934a2d4 | |||
f94d63ec94 | |||
50357e8b4e | |||
b1240de669 | |||
75f433b9bd | |||
53a1a3eb61 | |||
65f2a7ea31 | |||
49612d69d0 | |||
77ceb950b9 | |||
bffb860535 | |||
8807089c5b | |||
7cb3b2e56e | |||
387e7f949a | |||
cf562f140c | |||
ef890058b9 | |||
442848598d | |||
80c555ef76 | |||
d729d1c100 | |||
266ce200cc | |||
eb02acb22e | |||
f4e2928ac3 | |||
48677ac10b | |||
7a4d9e18d8 | |||
e1279e63d1 | |||
bab407fc65 | |||
afb9a9589a | |||
45bc2211c8 | |||
cb185f16bc | |||
3ab32aedc0 | |||
ea334aa92a | |||
1ef2bf2d2d | |||
a165959ab5 | |||
2386d5d786 | |||
18aa0c91da | |||
3f5a443c0c | |||
b771e9a190 | |||
0ffe2c67b0 | |||
9560a2b890 | |||
8fe49fdb55 | |||
106420fba9 | |||
85f101cdc8 | |||
7155360378 | |||
793a4ddbb2 | |||
b31b8d31ad | |||
914a7f160b | |||
5819c32fb8 | |||
f118933467 | |||
e4e5409d32 | |||
35021565ff | |||
ff9c78cee7 | |||
d5b03408da | |||
97f764c7c5 | |||
b565b6b2f5 | |||
87a917b22b | |||
b1dbf5428e | |||
927a6e425d | |||
aa89be32f7 | |||
5c29af4883 | |||
85949bc5c8 | |||
85111e8d76 | |||
98ebba7ba4 | |||
891b067470 | |||
cb849995e4 | |||
156de26995 | |||
7436a9b35d | |||
aa7eaaed45 | |||
1520a9e2fc | |||
1a21edf085 | |||
8b66a737a7 | |||
183a20cfd8 | |||
1260dfcacc | |||
9ee5cb4395 | |||
3554d3568f | |||
c2a92d1254 | |||
efa6e89dc2 | |||
ba500fc3cb | |||
8446c6cd1f | |||
8e2350ec4c | |||
dd66b3bf25 | |||
dfa69d815e | |||
bf8682fd4e | |||
64b02ead37 | |||
afc2518c66 | |||
28b7b785b0 | |||
9cb592539a | |||
41a87406b3 | |||
163c075b3d | |||
c84f689766 | |||
c38a712c0b | |||
13de5edd70 | |||
090f2a839e | |||
e5cb04f309 | |||
d6faf6d5a1 | |||
b4ade3db3a | |||
7647f8899d | |||
c82d92bc82 | |||
67b13c3b70 | |||
9b93d85746 | |||
818c254cd4 | |||
23d65e7162 | |||
024fd54d0b | |||
c44c19e911 | |||
d923d1d66b | |||
1f2c1e14db | |||
07e3a0ec15 | |||
427db7c7e2 | |||
dad3a7f263 | |||
5bd0bb637f | |||
f05095770c | |||
de189f2db6 | |||
4463124bdd | |||
34402cc46a | |||
54d9833db0 | |||
5fe8cb56fc | |||
7919d81fb1 | |||
9d80b28a4f | |||
1fcd91bcc5 | |||
e456e2e63a | |||
ee41b99049 | |||
111d674e71 | |||
8f048cfbd9 | |||
7103ac6a32 | |||
f6b131e706 | |||
d1b2b99226 | |||
e356f2511b | |||
e5f8b22a43 | |||
45b84fb4bb | |||
f022c89249 | |||
ab05144716 | |||
aeb4914e67 | |||
76bcd4d44f | |||
50f5e1bc83 | |||
4c339dd4b0 | |||
7268131f57 | |||
85b020f76c | |||
a7833cc9a9 | |||
919294e977 | |||
d44151d6ff | |||
7640acfb1f | |||
aed9ecef2a | |||
18cddd7972 | |||
e6b25f4ae3 | |||
d1c0050e65 | |||
ecdfa136a0 | |||
5cd513ee63 | |||
ab45086546 | |||
77ba7359f4 | |||
8cbe2e14d9 | |||
ee86eedf01 | |||
1f89cf3343 | |||
c4e6511a59 | |||
44843be4c8 | |||
054e963bef | |||
afb66a7884 | |||
b9df9e26f2 | |||
25ae36ceb5 | |||
3ae8daedaa | |||
e11c1d66ab | |||
b913e1e11e | |||
3c4b6d5735 | |||
e6123eac19 | |||
30ca25897e | |||
abaee6b9ed | |||
4d7c9e1ab7 | |||
cc5687f26c | |||
cdb3616dca | |||
78e76f26f9 | |||
9a7580dedd | |||
dc2da8cff4 | |||
019a9f0329 | |||
fe5d9ad171 | |||
dbc0093b31 | |||
92e512b8b6 | |||
abe4dc8ac1 | |||
dc14701d20 | |||
737e0f3085 | |||
81b7ea4362 | |||
09dfde0ba1 | |||
3ba7e966b5 | |||
a1cd4834d1 | |||
a724038dc6 | |||
4221cf7731 | |||
c34ac91ff0 | |||
5fe38f7c88 | |||
bd7e515290 | |||
076fac07eb | |||
9348161600 | |||
dac3c158a5 | |||
17d8bbf330 | |||
9344687a56 | |||
cf534d735c | |||
501924bc60 | |||
d117251747 | |||
6ea61a8486 | |||
e4d903af20 | |||
2d9797da35 | |||
07ea806553 | |||
5ac0316c62 | |||
9536ba22af | |||
5503749085 | |||
9bfe2fa371 | |||
d8ce6e4426 | |||
43d2d6d98c | |||
64c233efd4 | |||
2245a4e117 | |||
9ceec40b76 | |||
0f13b90059 | |||
d91fc16ae4 | |||
bc01a96f9d | |||
85b2822f5e | |||
c33d8694bb | |||
685bd027f0 | |||
f592d620d5 | |||
2b127b73ac | |||
8855902cfe | |||
9d8ddc6a08 | |||
4ca5189e73 | |||
873597cb84 | |||
44d742f232 | |||
6e7dbf99f3 | |||
1ba1076888 | |||
cafa108f69 | |||
deeff36e16 | |||
d770b14358 | |||
20414ba4ad | |||
92721a1d45 | |||
f329fddab9 | |||
f2efde27f6 | |||
02c58f22be | |||
f751dcd245 | |||
a97107bd90 | |||
b2ce45a417 | |||
4e0b5d85ba | |||
a958ae5e29 | |||
4d50fbf8dc | |||
485f6e5954 | |||
1f6ce838ba | |||
0dc5773849 | |||
bc347f749c | |||
1b215059e7 | |||
db079a2733 | |||
26f71d3536 | |||
eb7ae2588c | |||
278c14ba2e | |||
74e83dda54 | |||
28c1fca477 | |||
1f0324102a | |||
a782ad092d | |||
eae4eb419a | |||
fb7f38f46e | |||
93d0cae455 | |||
35f6b5d562 | |||
2aefa06ef1 | |||
5906888477 | |||
f22c7d0da6 | |||
93b38707b2 | |||
6ecf53078f | |||
9c93b7cb59 | |||
7789e8319c | |||
7d7a28beb3 | |||
27a113d872 | |||
67f8f222d9 | |||
5347c12fed | |||
b194180f76 | |||
fb30b7d17a | |||
c341dcaa3d | |||
b695a2574b | |||
aa68a326c8 | |||
c2922d5991 | |||
85888030c3 | |||
7cf59c1e60 | |||
9738b0ff69 | |||
3021c78390 | |||
6eeaf8d9fb | |||
fa9afec0c2 | |||
d6862bf8c1 | |||
de01c38bbe | |||
7e811908e0 | |||
5f59f24f92 | |||
e414fcf3fb | |||
079ad8f35a | |||
a4d7e0c78e | |||
e9c2f173c5 | |||
44f489d581 | |||
cb48bbd806 | |||
0a761d7c43 | |||
a0f47aa72e | |||
f9abc6fc85 | |||
d840c597b5 | |||
3ca654d256 | |||
e0e01f6c50 | |||
d9dab1b6c7 | |||
3b2ef6e1a8 | |||
c125a3871a | |||
0996bd5acf | |||
ea77d557da | |||
1b01161ea4 | |||
2230cb9562 | |||
9e0c7c46a2 | |||
be305588d3 | |||
9f994df814 | |||
3062580006 | |||
596ba754b1 | |||
b980e563b9 | |||
7fe2606cb3 | |||
0c3b1fe3c4 | |||
c9ee2e351c | |||
e3aef20f42 | |||
60614badaf | |||
288cee9611 | |||
24aca37538 | |||
b853ceea65 | |||
3ee2798ede | |||
5c5106c14a | |||
c367b21c71 | |||
2eef6df66a | |||
300aa8d86c | |||
727f1638d7 | |||
ee6df5852a | |||
90525b1c43 | |||
bbb95dbc5b | |||
f4b7f80d59 | |||
220f7373c8 | |||
4bb5785f29 | |||
f9a7a7d161 | |||
de94c780d9 | |||
0b9230380c | |||
209a55b681 | |||
dc2f69f5d1 | |||
ad2f1b7b36 | |||
dd2d96a50f | |||
2bff28e305 | |||
d68234d879 | |||
b3babf26a5 | |||
ecca0eff31 | |||
28677f9621 | |||
caecfadf11 | |||
5cf8e3aa53 | |||
76cf2c61db | |||
b4d976f2db | |||
777d127c74 | |||
0678803803 | |||
d2fbc9f5e3 | |||
d81088dff7 | |||
1aaad9336f | |||
1f3c024d9d | |||
74a480f94e | |||
c6e8d3269c | |||
dcb5a3a740 | |||
c0ef546b02 | |||
7a78a83651 | |||
10cbf99310 | |||
b63aefcda9 | |||
6a77634b34 | |||
8ca91b1774 | |||
1c9d9e79d5 | |||
3aa1ee1218 | |||
06aa5a8120 | |||
580f9ecded | |||
270032670a | |||
4f056cdb55 | |||
c14241436b | |||
50b56d6088 | |||
8ec2ae7954 | |||
40d82b29cf | |||
0b953d98f5 | |||
8833d76709 | |||
027b316fd2 | |||
d612f11c11 | |||
250b0ab182 | |||
675dd12b6c | |||
7e76eea059 | |||
f45483e519 | |||
65047bf976 | |||
d586a82a53 | |||
28709961e9 | |||
e9f237f39d | |||
4156bfd810 | |||
fe75b95464 | |||
95954188b2 | |||
63f59201f8 | |||
370e8281b3 | |||
685df33584 | |||
4332c9c7a6 | |||
4a00f1cc74 | |||
7ff77504cb | |||
0d1854e44a | |||
fe6858f2d9 | |||
12c7db3a16 | |||
3ecdec02bf | |||
d6c24d59b0 | |||
bb3d1bb6cb | |||
14c8738a71 | |||
1a829bb998 | |||
9d339e94f2 | |||
ad7b1fa6fb | |||
42355b70c2 | |||
faa2558e2f | |||
081397737b | |||
55d36eaf4f | |||
26cd1728ac | |||
a0065da4a4 | |||
c11e823ff3 | |||
197e50a298 | |||
507e12520e | |||
2cc04de397 | |||
f4150a7829 | |||
5418bd3b24 | |||
76d5fa4694 | |||
386dda8233 | |||
8076c1697c | |||
65fc9a6e0e | |||
cde0b6ae8d | |||
b12760b976 | |||
b679a6ba37 | |||
2f5f08c35d | |||
8f48c14ed4 | |||
5d37fa6e36 | |||
f51581bd1b | |||
50ca6b6ffc | |||
63b9ec4c5e | |||
b115bc4247 | |||
dadc30f795 | |||
111d8391e2 | |||
1157b454b2 | |||
8a6473610b | |||
ea7911be89 | |||
9ee648e0c3 | |||
543682fd3b | |||
88cb63e4a1 | |||
76212d1cca | |||
a8df9e5122 | |||
2db180d909 | |||
b716fe8f06 | |||
69e2dc0404 | |||
a38b75572f | |||
e18de761b6 | |||
816ea39827 | |||
1cd4cdd0e5 | |||
768e969c90 | |||
57db66634d | |||
87789c1de8 | |||
c3c1511ec6 | |||
6b41127421 | |||
d232a439f7 | |||
c04f21e83e | |||
8762069b37 | |||
d9ebdd2684 | |||
3e4c10ef9c | |||
17eb2ca5a2 | |||
63725d7534 | |||
00f30ea457 | |||
1b2a3c7144 | |||
01a1777370 | |||
32945c7f45 | |||
b0b8846430 | |||
fdb146a43a | |||
42c1f1fc9d | |||
89a8ef86b5 | |||
f0fb767f57 | |||
4bd93464bf | |||
3d3de82ca9 | |||
c3ff9e6be8 | |||
21f79e5919 | |||
0342e25c74 | |||
91f982fb0b | |||
b9ab43a4bb | |||
6e0e48bf8a | |||
dcc8313dbf | |||
bf5831faa3 | |||
5eff035f55 | |||
7c60068388 | |||
d843fb078a | |||
41b2e4633f | |||
57144ac0cf | |||
a305b6adbf | |||
94daaa4abf | |||
901337186d | |||
7e2f64f60b | |||
126cba2324 | |||
2f9dcd7906 | |||
e537b5d8e1 | |||
e0e70c9222 | |||
1b21e5df54 | |||
4b76af37ae | |||
486c445afb | |||
4547c48013 | |||
8f21201c91 | |||
532b74a206 | |||
0b184913b9 | |||
97719e40e4 | |||
5ad3062b66 | |||
92d012a92d | |||
fc187f263e | |||
fd94f85abe | |||
4e9e1b660d | |||
d01adedff5 | |||
c247f430f7 | |||
3d6a358042 | |||
4d1dcd11de | |||
b33655b0d6 | |||
81dee04dc9 | |||
114018e3e6 | |||
ef8cf83b28 | |||
633857b0e3 | |||
214574d11f | |||
8584665ade | |||
516c56d0c5 | |||
5891b43ce2 | |||
62e75f95aa | |||
b07621e27e | |||
545d8968fd | |||
7cf2f58513 | |||
618e3e5e91 | |||
c703b60986 | |||
7c0ce5c282 | |||
82fe34b1f7 | |||
65f9aae81d | |||
2d9fac23e7 | |||
ebc4b52f41 | |||
c4e6d4b348 | |||
eab32bce6c | |||
55d2094094 | |||
a0d50a2b23 | |||
9efeb1b2ec | |||
86e2cb0428 | |||
53c2c0f91d | |||
bdc7b8b75a | |||
1bfdd54810 | |||
b4bf6c12a5 | |||
ab35c241c2 | |||
b3dccfaeb6 | |||
6477e31c1e | |||
dd4a1c998b | |||
70203e6e5a | |||
d778a7c5ca | |||
f8e59636cd | |||
2d1a0b0a05 | |||
c9b2234d90 | |||
82b224539b | |||
0b15ffb95b | |||
ce9aaab22f | |||
3f53f1186d | |||
c0aff396d2 | |||
955900507f | |||
d606abc544 | |||
44400d2a66 | |||
60a98cacef | |||
6a990565ff | |||
3f0b0f3250 | |||
1a7371ea17 | |||
850d1ee984 | |||
2c7928b163 | |||
87d1ec6a4c | |||
53c62537f7 | |||
418d93fdfd | |||
f2ce2f1778 | |||
5b6c61fc75 | |||
1d77581d96 | |||
3b921cf393 | |||
d334f7f1f6 | |||
8c9764476c | |||
b7d5a3e0b5 | |||
e0405031a7 | |||
ee24b686b3 | |||
835eb14c79 | |||
9aadf7abc1 | |||
243f9e8377 | |||
6e0c6d9cc9 | |||
a3076cf951 | |||
6696882c71 | |||
17b039e85d | |||
81539e6ab4 | |||
92304b9f8a | |||
ec1de5ae8b | |||
49198a61ef | |||
c22d529528 | |||
8c5773abc1 | |||
cd98d88fe7 | |||
34e3aa1f88 | |||
49ffb64ef3 | |||
ec14e2db35 | |||
5725fcb3e0 | |||
1447b6df96 | |||
e700da23d8 | |||
b4ed8bc47a | |||
bd85e00530 | |||
4e446130d8 | |||
4c93b514bb | |||
d078941316 | |||
230d3a496d | |||
ec2890c19b | |||
a540cc537f | |||
39c57aa358 | |||
01f8c37bd3 | |||
2d990c1f54 | |||
7fb2da8741 | |||
b7718985d5 | |||
c69fcb1c10 | |||
90cda11868 | |||
0982548e1f | |||
5cb877e096 | |||
11a29fdc4d | |||
24407048a5 | |||
a7c2333312 | |||
b5b541c747 | |||
ad6ea02c9c | |||
1a6ed85d99 | |||
a094bbd839 | |||
73dda812ea | |||
8eaf1c4033 | |||
4f44b64052 | |||
c559bf3e10 | |||
a485515bc6 | |||
2c9b29725b | |||
28612c899a | |||
88acbeaa35 | |||
46729efe95 | |||
b3d03e1146 | |||
e29c9a7d9e | |||
9b157b6532 | |||
10a1e7962b | |||
cb672d7d00 | |||
e791fb6b0b | |||
1c9001ad21 | |||
3083356cf0 | |||
179814e50a | |||
9515c07fca | |||
a45e94fde7 | |||
8b6196e0a2 | |||
ee2c0ab51b | |||
ca5f129902 | |||
cf2eca7c60 | |||
16aea1e869 | |||
75ff6cd3c3 | |||
7b7b31637c | |||
fca564c18a | |||
eb8d87e185 | |||
dbadb1d7b5 | |||
a4afb69615 | |||
8b7925edf3 | |||
168a51c5a6 | |||
3f5d8c3e44 | |||
609bb19573 | |||
d561d6d3dd | |||
7ffaa17551 | |||
97eac58a50 | |||
cedbe8fcd7 | |||
a461875abd | |||
ab018ccdfe | |||
d41dcdfc46 | |||
972aecc4c5 | |||
6b7be4e5dc | |||
9b1a7b553f | |||
7f99efc5df | |||
0a6d8b4855 | |||
5e41811fb5 | |||
5a4967582e | |||
1d0ba4a1a7 | |||
4878c7a2d5 | |||
9e5aa645a7 | |||
d01e23973e | |||
71bbd78574 | |||
fff41a7349 | |||
d5f524a156 | |||
3ab9d02883 | |||
27a2e27c3a | |||
da04b11a31 | |||
3795b40f63 | |||
9436f2e3d1 | |||
7fadd5e5c4 | |||
4c2a588e1f | |||
5f9de762ff | |||
91f7abb398 | |||
6420b81a5d | |||
b6ed5eafd6 | |||
694d5aa2e8 | |||
833079140b | |||
fd27948c36 | |||
1dfaaa2a57 | |||
bac6b50dd1 | |||
a30c91f398 | |||
17294bfa55 | |||
3fa1771cc9 | |||
f3bd386ff0 | |||
8486ce31de | |||
1d9845557f | |||
55dce6cfdd | |||
58be915446 | |||
dc9268f772 | |||
47ddc00c6a | |||
0d22fd59ed | |||
d5efd57c28 | |||
b52a92da7e | |||
b949162e7e | |||
5409991256 | |||
be1bcbc173 | |||
d6196e863d | |||
63e790b79b | |||
cf53bba99e | |||
ed4c8f6a8a | |||
aab8263c31 | |||
b21bd6f428 | |||
cb6903dfd0 | |||
cd87ca8214 | |||
58e5bf5a58 | |||
f17c7ca6f7 | |||
c3dd28cff9 | |||
db4e1e8b53 | |||
3e43c3e698 | |||
cc7733af1c | |||
2a29734a56 | |||
f2e533f7c8 | |||
078f897b67 | |||
8352ab2076 | |||
1a3d47814b | |||
e852ad0a51 | |||
136cd0e868 | |||
7afe26320a | |||
702da71515 | |||
b313cf8afd | |||
852d78d9ad | |||
5570a88858 | |||
cfd897874b | |||
1249147c57 | |||
eec5c3bbb1 | |||
ca8d9fb885 | |||
7d77fb9691 | |||
a4c0dfb33c | |||
2dded68267 | |||
172ce3dc25 | |||
6c8d4b091e | |||
7beebc3659 | |||
5461318eda | |||
d0abe13b60 | |||
aca9d74489 | |||
a0c213a158 | |||
740210fc99 | |||
ca10d0652f | |||
e1a85d8184 | |||
9d8236c59d | |||
7eafcd47a6 | |||
ded3f13a33 | |||
e5646d7241 | |||
79ac9698c1 | |||
d29f57c93d | |||
9b7cde8918 | |||
8ae71303a5 | |||
2cd7bd4a8e | |||
b813298f2a | |||
58f787f7d4 | |||
2bba543d20 | |||
d3c1b747ee | |||
b9ecf93ba3 | |||
487da8394d | |||
4c93bc56f8 | |||
727dfeae43 | |||
88d561dee7 | |||
7a379f1d4f | |||
3ad89f99d2 | |||
d76c5da514 | |||
da5b0673e7 | |||
d7180afe9d | |||
2e9c15711b | |||
e19b08b149 | |||
234d76a269 | |||
826d941068 | |||
34e449213c | |||
671c5943e4 | |||
16c24ec367 | |||
e8240855e0 | |||
a5e065048e | |||
a53c3269db | |||
8bf93d3a32 | |||
d42cc0fd1c | |||
d2553d783c | |||
10b747d22b | |||
1d567fa593 | |||
3a3dd39d3a | |||
f4b3d7dba2 | |||
de2c7fd372 | |||
b140e1c619 | |||
1308584289 | |||
2ac4778bcf | |||
6101d67dba | |||
3cd50fe3a1 | |||
e683b574d1 | |||
0decd05913 | |||
d01b7ea2d2 | |||
4fa91724d9 | |||
e3d1c64b77 | |||
17f35a7bba | |||
ab2f0a6fbf | |||
41cbf2f7c4 | |||
d5d2e1d7a3 | |||
587faa3e52 | |||
80229ab73e | |||
68b2911d2f | |||
2bf2f627e4 | |||
58676b2ce2 | |||
11f79dc1e1 | |||
2a095ddc8e | |||
dd849d2e91 | |||
8c63fac958 | |||
11a70e9764 | |||
33ce78e4a2 | |||
4f78518858 | |||
fad99ac4d2 | |||
423b592b25 | |||
8aa7d1da55 | |||
6b702c32ca | |||
767012aec0 | |||
2267057e2b | |||
b8212e4dea | |||
5b7e4a5f5d | |||
07f9fa63d0 | |||
1ae8986451 | |||
b305c240de | |||
248dc81ec3 | |||
ebe0071ed2 | |||
7a518218e5 | |||
fc14ac7faa | |||
95e2739c47 | |||
f129393a2e | |||
c55bbd1a85 | |||
ccba41cdb2 | |||
3d442bbf22 | |||
4888d0d832 | |||
47de3fb007 | |||
41bc160cb8 | |||
d0ba155c19 | |||
5f0848bf7d | |||
6551527fe2 | |||
159ce2ea08 | |||
3715570d17 | |||
65a7432b5a | |||
557e28f460 | |||
62a7f252f5 | |||
2fa14200aa | |||
0605cf94f0 | |||
d69156c616 | |||
0963bbbe78 | |||
f3351a5e47 | |||
f3f4c68acc | |||
5d617ce63d | |||
8a0d45ac5a | |||
2468ba7445 | |||
65b7d2db47 | |||
e07f1bb89c | |||
f4f813d108 | |||
6217edcb6c | |||
c5cc832304 | |||
a76038bac4 | |||
ff4942f9b4 | |||
1ccad64871 | |||
19f0022bbe | |||
ecc7b7a700 | |||
e46102124e | |||
314ed7d8f6 | |||
b1341bc611 | |||
07be605dcb | |||
fe318775c3 | |||
1bb07795d8 | |||
caf07479ec | |||
508780d07f | |||
05e67e924c | |||
fb2488314f | |||
062f58209b | |||
7cb9d6b1a6 | |||
fb721234ec | |||
92906aeb08 | |||
cab41f0538 | |||
5d0dcaf81e | |||
9591c8d4e0 | |||
bcb1fbe031 | |||
e87a2fe14b | |||
d00571b5a4 | |||
b08a514594 | |||
265ccaca4a | |||
7aa6c827f7 | |||
093174942b | |||
f299f40763 | |||
7545e38655 | |||
0bc55a0d55 | |||
d38e7170fe | |||
15a9412255 | |||
e29399e032 | |||
bc18a94d8c | |||
5d2bdd478c | |||
9cacba916b | |||
628e82fa79 | |||
fbbbba2fac | |||
9cbf9d52b4 | |||
fb35fe1a41 | |||
b60b5750af | |||
3ff40114fa | |||
71c6ae8789 | |||
d9a7536fa8 | |||
99f4417cd7 | |||
47f94bde04 | |||
197e6b95e3 | |||
8e47ca8d57 | |||
714fff39ba | |||
89239d1c54 | |||
c03d98cf46 | |||
d1ad46d6f1 | |||
6ae7560f66 | |||
e561d19206 | |||
9eed1919c2 | |||
b87f7b1129 | |||
7410a60208 | |||
7c86130a3d | |||
58a1d9aae0 | |||
24e32f6ae2 | |||
3dd7393984 | |||
f18f743d03 | |||
c660dcdfcd | |||
9e0250c0b4 | |||
08c747f1e0 | |||
04ae6fde80 | |||
b1a53c8ef0 | |||
cd64511f24 | |||
1e98e0b159 | |||
4f7af55bc3 | |||
d0e6a57e48 | |||
d28a486769 | |||
84722d92f6 | |||
8a3b5ac21d | |||
717d53a773 | |||
96926d6648 | |||
f3639de8b1 | |||
b71e675e8d | |||
d3c850104b | |||
c00155f6a4 | |||
8753070fc7 | |||
ed8f9f021d | |||
3ccc705396 | |||
11e422cf29 | |||
7f695fed39 | |||
310501cd8a | |||
106b3aea1b | |||
6e52ca3307 | |||
94c31f672f | |||
240bbb9852 | |||
8cf2ed91a9 | |||
7be5b4ca8b | |||
d589ad96aa | |||
097e41e8d2 | |||
4cf43b858d | |||
13a4666a6e | |||
9232290950 | |||
f3153d45bc | |||
d9cb6da951 | |||
17535d887f | |||
35da7f5b96 | |||
4e95a68582 | |||
9dfeb93f80 | |||
02247ffc79 | |||
48da030415 | |||
817e04bee0 | |||
e5d0b0c37d | |||
950f450665 | |||
79daf8b039 | |||
383cbca896 | |||
07c55d5e2a | |||
156151df45 | |||
03b1d71af9 | |||
f6ad107fdd | |||
e2c392631a | |||
4a1b4d63ef | |||
83ecda977c | |||
9601febef8 | |||
0503680efa | |||
57ccec1df3 | |||
22f3634481 | |||
5590c73af2 | |||
1f76b30e54 | |||
8bd04654c7 | |||
0dce3188cc | |||
106c7aa956 | |||
b04f199035 | |||
a2b992dfd1 | |||
745e253a78 | |||
2ea551d37d | |||
8d1481ca10 | |||
307e7e00c2 | |||
c3ad1c8a9f | |||
05d51d7b5b | |||
09f69a4d28 | |||
a338af17c8 | |||
bc82fc0cdd | |||
418a3d6e41 | |||
fbcc52ec3d | |||
47e89f4ba1 | |||
888d3ae968 | |||
a28120abdd | |||
4493d83aea | |||
eff0fb9a69 | |||
5bb0f9bedc | |||
bf812e6493 | |||
a3da12d867 | |||
6b4a06c3fc | |||
3833b28132 | |||
e8f9ab82ed | |||
6ab364b16a | |||
a4dc11addc | |||
0372702eb4 | |||
aa8eeea478 | |||
e54ecc4c37 | |||
4a12c76097 | |||
be72faf78e | |||
28d44d80ed | |||
9008d9996f | |||
be2a9b78bb | |||
70003ee5b1 | |||
45a5ccba84 | |||
f80a64a0f4 | |||
511df2963b | |||
f92f62a91b | |||
7f41893da4 | |||
42da4f57c2 | |||
c2e11dfe83 | |||
17e1930229 | |||
bde94347d3 | |||
b1612afff4 | |||
1d10d952b2 | |||
9150f9ef3c | |||
7bc0f7cc6c | |||
c52d11b24c | |||
59486615dd | |||
f0212cd361 | |||
ee4cb5fdc9 | |||
75b919237b | |||
07a9062e1f | |||
cdb3e18b80 | |||
01eb93d664 | |||
89f69c2d94 | |||
dc6f6fcab7 | |||
6ca177e462 |
@ -3,21 +3,23 @@
|
|||||||
!invokeai
|
!invokeai
|
||||||
!ldm
|
!ldm
|
||||||
!pyproject.toml
|
!pyproject.toml
|
||||||
!README.md
|
|
||||||
|
# ignore frontend/web but whitelist dist
|
||||||
|
invokeai/frontend/web/
|
||||||
|
!invokeai/frontend/web/dist/
|
||||||
|
|
||||||
|
# ignore invokeai/assets but whitelist invokeai/assets/web
|
||||||
|
invokeai/assets/
|
||||||
|
!invokeai/assets/web/
|
||||||
|
|
||||||
# Guard against pulling in any models that might exist in the directory tree
|
# Guard against pulling in any models that might exist in the directory tree
|
||||||
**/*.pt*
|
**/*.pt*
|
||||||
**/*.ckpt
|
**/*.ckpt
|
||||||
|
|
||||||
# ignore frontend but whitelist dist
|
# Byte-compiled / optimized / DLL files
|
||||||
invokeai/frontend/**
|
**/__pycache__/
|
||||||
!invokeai/frontend/dist
|
|
||||||
|
|
||||||
# ignore invokeai/assets but whitelist invokeai/assets/web
|
|
||||||
invokeai/assets
|
|
||||||
!invokeai/assets/web
|
|
||||||
|
|
||||||
# ignore python cache
|
|
||||||
**/__pycache__
|
|
||||||
**/*.py[cod]
|
**/*.py[cod]
|
||||||
**/*.egg-info
|
|
||||||
|
# Distribution / packaging
|
||||||
|
**/*.egg-info/
|
||||||
|
**/*.egg
|
||||||
|
1
.git-blame-ignore-revs
Normal file
@ -0,0 +1 @@
|
|||||||
|
b3dccfaeb636599c02effc377cdd8a87d658256c
|
60
.github/CODEOWNERS
vendored
@ -1,50 +1,34 @@
|
|||||||
# continuous integration
|
# continuous integration
|
||||||
/.github/workflows/ @mauwii
|
/.github/workflows/ @mauwii @lstein @blessedcoolant
|
||||||
|
|
||||||
# documentation
|
# documentation
|
||||||
/docs/ @lstein @mauwii @tildebyte
|
/docs/ @lstein @mauwii @tildebyte @blessedcoolant
|
||||||
mkdocs.yml @lstein @mauwii
|
/mkdocs.yml @lstein @mauwii @blessedcoolant
|
||||||
|
|
||||||
|
# nodes
|
||||||
|
/invokeai/app/ @Kyle0654 @blessedcoolant
|
||||||
|
|
||||||
# installation and configuration
|
# installation and configuration
|
||||||
/pyproject.toml @mauwii @lstein @ebr
|
/pyproject.toml @mauwii @lstein @blessedcoolant
|
||||||
/docker/ @mauwii
|
/docker/ @mauwii @lstein @blessedcoolant
|
||||||
/scripts/ @ebr @lstein
|
/scripts/ @ebr @lstein
|
||||||
/installer/ @ebr @lstein @tildebyte
|
/installer/ @lstein @ebr
|
||||||
ldm/invoke/config @lstein @ebr
|
/invokeai/assets @lstein @ebr
|
||||||
invokeai/assets @lstein @ebr
|
/invokeai/configs @lstein
|
||||||
invokeai/configs @lstein @ebr
|
/invokeai/version @lstein @blessedcoolant
|
||||||
/ldm/invoke/_version.py @lstein @blessedcoolant
|
|
||||||
|
|
||||||
# web ui
|
# web ui
|
||||||
/invokeai/frontend @blessedcoolant @psychedelicious
|
/invokeai/frontend @blessedcoolant @psychedelicious @lstein
|
||||||
/invokeai/backend @blessedcoolant @psychedelicious
|
/invokeai/backend @blessedcoolant @psychedelicious @lstein
|
||||||
|
|
||||||
# generation and model management
|
# generation, model management, postprocessing
|
||||||
/ldm/*.py @lstein
|
/invokeai/backend @keturn @damian0815 @lstein @blessedcoolant @jpphoto
|
||||||
/ldm/generate.py @lstein @keturn
|
|
||||||
/ldm/invoke/args.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/ckpt* @lstein
|
|
||||||
/ldm/invoke/ckpt_generator @lstein
|
|
||||||
/ldm/invoke/CLI.py @lstein
|
|
||||||
/ldm/invoke/config @lstein @ebr @mauwii
|
|
||||||
/ldm/invoke/generator @keturn @damian0815
|
|
||||||
/ldm/invoke/globals.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/merge_diffusers.py @lstein
|
|
||||||
/ldm/invoke/model_manager.py @lstein @blessedcoolant
|
|
||||||
/ldm/invoke/txt2mask.py @lstein
|
|
||||||
/ldm/invoke/patchmatch.py @Kyle0654
|
|
||||||
/ldm/invoke/restoration @lstein @blessedcoolant
|
|
||||||
|
|
||||||
# attention, textual inversion, model configuration
|
# front ends
|
||||||
/ldm/models @damian0815 @keturn
|
/invokeai/frontend/CLI @lstein
|
||||||
/ldm/modules @damian0815 @keturn
|
/invokeai/frontend/install @lstein @ebr @mauwii
|
||||||
|
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||||
|
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||||
|
/invokeai/frontend/web @psychedelicious @blessedcoolant
|
||||||
|
|
||||||
# Nodes
|
|
||||||
apps/ @Kyle0654
|
|
||||||
|
|
||||||
# legacy REST API
|
|
||||||
# is CapableWeb still engaged?
|
|
||||||
/ldm/invoke/pngwriter.py @CapableWeb
|
|
||||||
/ldm/invoke/server_legacy.py @CapableWeb
|
|
||||||
/scripts/legacy_api.py @CapableWeb
|
|
||||||
/tests/legacy_tests.sh @CapableWeb
|
|
||||||
|
10
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@ -65,6 +65,16 @@ body:
|
|||||||
placeholder: 8GB
|
placeholder: 8GB
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: version-number
|
||||||
|
attributes:
|
||||||
|
label: What version did you experience this issue on?
|
||||||
|
description: |
|
||||||
|
Please share the version of Invoke AI that you experienced the issue on. If this is not the latest version, please update first to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||||
|
placeholder: X.X.X
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: what-happened
|
id: what-happened
|
||||||
|
19
.github/stale.yaml
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# Number of days of inactivity before an issue becomes stale
|
||||||
|
daysUntilStale: 28
|
||||||
|
# Number of days of inactivity before a stale issue is closed
|
||||||
|
daysUntilClose: 14
|
||||||
|
# Issues with these labels will never be considered stale
|
||||||
|
exemptLabels:
|
||||||
|
- pinned
|
||||||
|
- security
|
||||||
|
# Label to use when marking an issue as stale
|
||||||
|
staleLabel: stale
|
||||||
|
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||||
|
markComment: >
|
||||||
|
This issue has been automatically marked as stale because it has not had
|
||||||
|
recent activity. It will be closed if no further activity occurs. Please
|
||||||
|
update the ticket if this is still a problem on the latest release.
|
||||||
|
# Comment to post when closing a stale issue. Set to `false` to disable
|
||||||
|
closeComment: >
|
||||||
|
Due to inactivity, this issue has been automatically closed. If this is
|
||||||
|
still a problem on the latest release, please recreate the issue.
|
78
.github/workflows/build-container.yml
vendored
@ -3,9 +3,22 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
- 'update/ci/*'
|
- 'update/ci/docker/*'
|
||||||
|
- 'update/docker/*'
|
||||||
|
- 'dev/ci/docker/*'
|
||||||
|
- 'dev/docker/*'
|
||||||
|
paths:
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- '.dockerignore'
|
||||||
|
- 'invokeai/**'
|
||||||
|
- 'docker/Dockerfile'
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- 'v*.*.*'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
packages: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
@ -14,24 +27,21 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
flavor:
|
flavor:
|
||||||
- amd
|
- rocm
|
||||||
- cuda
|
- cuda
|
||||||
- cpu
|
- cpu
|
||||||
include:
|
include:
|
||||||
- flavor: amd
|
- flavor: rocm
|
||||||
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||||
dockerfile: docker/Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
- flavor: cuda
|
- flavor: cuda
|
||||||
pip-extra-index-url: ''
|
pip-extra-index-url: ''
|
||||||
dockerfile: docker/Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
- flavor: cpu
|
- flavor: cpu
|
||||||
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||||
dockerfile: docker/Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: ${{ matrix.flavor }}
|
name: ${{ matrix.flavor }}
|
||||||
|
env:
|
||||||
|
PLATFORMS: 'linux/amd64,linux/arm64'
|
||||||
|
DOCKERFILE: 'docker/Dockerfile'
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -41,24 +51,27 @@ jobs:
|
|||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v4
|
||||||
with:
|
with:
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
images: ghcr.io/${{ github.repository }}
|
images: |
|
||||||
|
ghcr.io/${{ github.repository }}
|
||||||
|
${{ vars.DOCKERHUB_REPOSITORY }}
|
||||||
tags: |
|
tags: |
|
||||||
type=ref,event=branch
|
type=ref,event=branch
|
||||||
type=ref,event=tag
|
type=ref,event=tag
|
||||||
type=semver,pattern={{version}}
|
type=pep440,pattern={{version}}
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
type=pep440,pattern={{major}}.{{minor}}
|
||||||
type=semver,pattern={{major}}
|
type=pep440,pattern={{major}}
|
||||||
type=sha,enable=true,prefix=sha-,format=short
|
type=sha,enable=true,prefix=sha-,format=short
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||||
suffix=-${{ matrix.flavor }},onlatest=false
|
suffix=-${{ matrix.flavor }},onlatest=false
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
with:
|
with:
|
||||||
platforms: ${{ matrix.platforms }}
|
platforms: ${{ env.PLATFORMS }}
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
@ -68,25 +81,34 @@ jobs:
|
|||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build container
|
- name: Build container
|
||||||
|
id: docker_build
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ${{ matrix.dockerfile }}
|
file: ${{ env.DOCKERFILE }}
|
||||||
platforms: ${{ matrix.platforms }}
|
platforms: ${{ env.PLATFORMS }}
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
||||||
cache-from: type=gha
|
cache-from: |
|
||||||
cache-to: type=gha,mode=max
|
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||||
|
type=gha,scope=main-${{ matrix.flavor }}
|
||||||
|
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||||
|
|
||||||
- name: Output image, digest and metadata to summary
|
- name: Docker Hub Description
|
||||||
run: |
|
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
|
||||||
{
|
uses: peter-evans/dockerhub-description@v3
|
||||||
echo imageid: "${{ steps.docker_build.outputs.imageid }}"
|
with:
|
||||||
echo digest: "${{ steps.docker_build.outputs.digest }}"
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
echo labels: "${{ steps.meta.outputs.labels }}"
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
echo tags: "${{ steps.meta.outputs.tags }}"
|
repository: ${{ vars.DOCKERHUB_REPOSITORY }}
|
||||||
echo version: "${{ steps.meta.outputs.version }}"
|
short-description: ${{ github.event.repository.description }}
|
||||||
} >> "$GITHUB_STEP_SUMMARY"
|
|
||||||
|
27
.github/workflows/close-inactive-issues.yml
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
name: Close inactive issues
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "00 6 * * *"
|
||||||
|
|
||||||
|
env:
|
||||||
|
DAYS_BEFORE_ISSUE_STALE: 14
|
||||||
|
DAYS_BEFORE_ISSUE_CLOSE: 28
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
close-issues:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v5
|
||||||
|
with:
|
||||||
|
days-before-issue-stale: ${{ env.DAYS_BEFORE_ISSUE_STALE }}
|
||||||
|
days-before-issue-close: ${{ env.DAYS_BEFORE_ISSUE_CLOSE }}
|
||||||
|
stale-issue-label: "Inactive Issue"
|
||||||
|
stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. If this issue is still being experienced, please reply with an updated confirmation that the issue is still being experienced with the latest release."
|
||||||
|
close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue."
|
||||||
|
days-before-pr-stale: -1
|
||||||
|
days-before-pr-close: -1
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
operations-per-run: 500
|
22
.github/workflows/lint-frontend.yml
vendored
@ -3,14 +3,22 @@ name: Lint frontend
|
|||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- 'invokeai/frontend/**'
|
- 'invokeai/frontend/web/**'
|
||||||
|
types:
|
||||||
|
- 'ready_for_review'
|
||||||
|
- 'opened'
|
||||||
|
- 'synchronize'
|
||||||
push:
|
push:
|
||||||
|
branches:
|
||||||
|
- 'main'
|
||||||
paths:
|
paths:
|
||||||
- 'invokeai/frontend/**'
|
- 'invokeai/frontend/web/**'
|
||||||
|
merge_group:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: invokeai/frontend
|
working-directory: invokeai/frontend/web
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint-frontend:
|
lint-frontend:
|
||||||
@ -23,7 +31,7 @@ jobs:
|
|||||||
node-version: '18'
|
node-version: '18'
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- run: 'yarn install --frozen-lockfile'
|
- run: 'yarn install --frozen-lockfile'
|
||||||
- run: 'yarn tsc'
|
- run: 'yarn run lint:tsc'
|
||||||
- run: 'yarn run madge'
|
- run: 'yarn run lint:madge'
|
||||||
- run: 'yarn run lint --max-warnings=0'
|
- run: 'yarn run lint:eslint'
|
||||||
- run: 'yarn run prettier --check'
|
- run: 'yarn run lint:prettier'
|
||||||
|
3
.github/workflows/mkdocs-material.yml
vendored
@ -5,6 +5,9 @@ on:
|
|||||||
- 'main'
|
- 'main'
|
||||||
- 'development'
|
- 'development'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
mkdocs-material:
|
mkdocs-material:
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
|
4
.github/workflows/pypi-release.yml
vendored
@ -3,7 +3,7 @@ name: PyPI Release
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- 'ldm/invoke/_version.py'
|
- 'invokeai/version/invokeai_version.py'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@ -28,7 +28,7 @@ jobs:
|
|||||||
run: twine check dist/*
|
run: twine check dist/*
|
||||||
|
|
||||||
- name: check PyPI versions
|
- name: check PyPI versions
|
||||||
if: github.ref == 'refs/heads/main'
|
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/v2.3'
|
||||||
run: |
|
run: |
|
||||||
pip install --upgrade requests
|
pip install --upgrade requests
|
||||||
python -c "\
|
python -c "\
|
||||||
|
66
.github/workflows/test-invoke-pip-skip.yml
vendored
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
name: Test invoke.py pip
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- '**'
|
||||||
|
- '!pyproject.toml'
|
||||||
|
- '!invokeai/**'
|
||||||
|
- 'invokeai/frontend/web/**'
|
||||||
|
merge_group:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
matrix:
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
python-version:
|
||||||
|
# - '3.9'
|
||||||
|
- '3.10'
|
||||||
|
pytorch:
|
||||||
|
# - linux-cuda-11_6
|
||||||
|
- linux-cuda-11_7
|
||||||
|
- linux-rocm-5_2
|
||||||
|
- linux-cpu
|
||||||
|
- macos-default
|
||||||
|
- windows-cpu
|
||||||
|
# - windows-cuda-11_6
|
||||||
|
# - windows-cuda-11_7
|
||||||
|
include:
|
||||||
|
# - pytorch: linux-cuda-11_6
|
||||||
|
# os: ubuntu-22.04
|
||||||
|
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||||
|
# github-env: $GITHUB_ENV
|
||||||
|
- pytorch: linux-cuda-11_7
|
||||||
|
os: ubuntu-22.04
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- pytorch: linux-rocm-5_2
|
||||||
|
os: ubuntu-22.04
|
||||||
|
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- pytorch: linux-cpu
|
||||||
|
os: ubuntu-22.04
|
||||||
|
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- pytorch: macos-default
|
||||||
|
os: macOS-12
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- pytorch: windows-cpu
|
||||||
|
os: windows-2022
|
||||||
|
github-env: $env:GITHUB_ENV
|
||||||
|
# - pytorch: windows-cuda-11_6
|
||||||
|
# os: windows-2022
|
||||||
|
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||||
|
# github-env: $env:GITHUB_ENV
|
||||||
|
# - pytorch: windows-cuda-11_7
|
||||||
|
# os: windows-2022
|
||||||
|
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
|
||||||
|
# github-env: $env:GITHUB_ENV
|
||||||
|
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- run: 'echo "No build required"'
|
11
.github/workflows/test-invoke-pip.yml
vendored
@ -3,11 +3,20 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
|
paths:
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- 'invokeai/**'
|
||||||
|
- '!invokeai/frontend/web/**'
|
||||||
pull_request:
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- 'invokeai/**'
|
||||||
|
- '!invokeai/frontend/web/**'
|
||||||
types:
|
types:
|
||||||
- 'ready_for_review'
|
- 'ready_for_review'
|
||||||
- 'opened'
|
- 'opened'
|
||||||
- 'synchronize'
|
- 'synchronize'
|
||||||
|
merge_group:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
@ -99,7 +108,7 @@ jobs:
|
|||||||
- name: set INVOKEAI_OUTDIR
|
- name: set INVOKEAI_OUTDIR
|
||||||
run: >
|
run: >
|
||||||
python -c
|
python -c
|
||||||
"import os;from ldm.invoke.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
|
"import os;from invokeai.backend.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
|
||||||
>> ${{ matrix.github-env }}
|
>> ${{ matrix.github-env }}
|
||||||
|
|
||||||
- name: run invokeai-configure
|
- name: run invokeai-configure
|
||||||
|
14
.gitignore
vendored
@ -1,4 +1,5 @@
|
|||||||
# ignore default image save location and model symbolic link
|
# ignore default image save location and model symbolic link
|
||||||
|
.idea/
|
||||||
embeddings/
|
embeddings/
|
||||||
outputs/
|
outputs/
|
||||||
models/ldm/stable-diffusion-v1/model.ckpt
|
models/ldm/stable-diffusion-v1/model.ckpt
|
||||||
@ -62,15 +63,18 @@ pip-delete-this-directory.txt
|
|||||||
htmlcov/
|
htmlcov/
|
||||||
.tox/
|
.tox/
|
||||||
.nox/
|
.nox/
|
||||||
|
.coveragerc
|
||||||
.coverage
|
.coverage
|
||||||
.coverage.*
|
.coverage.*
|
||||||
.cache
|
.cache
|
||||||
nosetests.xml
|
nosetests.xml
|
||||||
coverage.xml
|
coverage.xml
|
||||||
|
cov.xml
|
||||||
*.cover
|
*.cover
|
||||||
*.py,cover
|
*.py,cover
|
||||||
.hypothesis/
|
.hypothesis/
|
||||||
.pytest_cache/
|
.pytest_cache/
|
||||||
|
.pytest.ini
|
||||||
cover/
|
cover/
|
||||||
junit/
|
junit/
|
||||||
|
|
||||||
@ -196,7 +200,7 @@ checkpoints
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
# Let the frontend manage its own gitignore
|
# Let the frontend manage its own gitignore
|
||||||
!invokeai/frontend/*
|
!invokeai/frontend/web/*
|
||||||
|
|
||||||
# Scratch folder
|
# Scratch folder
|
||||||
.scratch/
|
.scratch/
|
||||||
@ -211,11 +215,6 @@ gfpgan/
|
|||||||
# config file (will be created by installer)
|
# config file (will be created by installer)
|
||||||
configs/models.yaml
|
configs/models.yaml
|
||||||
|
|
||||||
# weights (will be created by installer)
|
|
||||||
models/ldm/stable-diffusion-v1/*.ckpt
|
|
||||||
models/clipseg
|
|
||||||
models/gfpgan
|
|
||||||
|
|
||||||
# ignore initfile
|
# ignore initfile
|
||||||
.invokeai
|
.invokeai
|
||||||
|
|
||||||
@ -230,6 +229,3 @@ installer/install.bat
|
|||||||
installer/install.sh
|
installer/install.sh
|
||||||
installer/update.bat
|
installer/update.bat
|
||||||
installer/update.sh
|
installer/update.sh
|
||||||
|
|
||||||
# no longer stored in source directory
|
|
||||||
models
|
|
158
README.md
@ -1,6 +1,6 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
# InvokeAI: A Stable Diffusion Toolkit
|
# InvokeAI: A Stable Diffusion Toolkit
|
||||||
|
|
||||||
@ -10,10 +10,10 @@
|
|||||||
|
|
||||||
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
|
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
|
||||||
|
|
||||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
|
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
|
||||||
|
|
||||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
[CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
|
||||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||||
@ -28,12 +28,14 @@
|
|||||||
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
|
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
|
||||||
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||||
|
[translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg
|
||||||
|
[translation status link]: https://hosted.weblate.org/engage/invokeai/
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
|
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
|
||||||
|
|
||||||
**Quick links**: [[How to Install](#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
**Quick links**: [[How to Install](https://invoke-ai.github.io/InvokeAI/#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||||
|
|
||||||
_Note: InvokeAI is rapidly evolving. Please use the
|
_Note: InvokeAI is rapidly evolving. Please use the
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||||
@ -41,38 +43,136 @@ requests. Be sure to use the provided templates. They will help us diagnose issu
|
|||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
# Getting Started with InvokeAI
|
## Table of Contents
|
||||||
|
|
||||||
|
1. [Quick Start](#getting-started-with-invokeai)
|
||||||
|
2. [Installation](#detailed-installation-instructions)
|
||||||
|
3. [Hardware Requirements](#hardware-requirements)
|
||||||
|
4. [Features](#features)
|
||||||
|
5. [Latest Changes](#latest-changes)
|
||||||
|
6. [Troubleshooting](#troubleshooting)
|
||||||
|
7. [Contributing](#contributing)
|
||||||
|
8. [Contributors](#contributors)
|
||||||
|
9. [Support](#support)
|
||||||
|
10. [Further Reading](#further-reading)
|
||||||
|
|
||||||
|
## Getting Started with InvokeAI
|
||||||
|
|
||||||
For full installation and upgrade instructions, please see:
|
For full installation and upgrade instructions, please see:
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||||
|
|
||||||
|
### Automatic Installer (suggested for 1st time users)
|
||||||
|
|
||||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
||||||
|
|
||||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||||
|
|
||||||
3. Unzip the file.
|
3. Unzip the file.
|
||||||
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
|
|
||||||
5. Wait a while, until it is done.
|
|
||||||
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
|
|
||||||
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
|
|
||||||
8. Type `banana sushi` in the box on the top left and click `Invoke`
|
|
||||||
|
|
||||||
|
4. If you are on Windows, double-click on the `install.bat` script. On
|
||||||
|
macOS, open a Terminal window, drag the file `install.sh` from Finder
|
||||||
|
into the Terminal, and press return. On Linux, run `install.sh`.
|
||||||
|
|
||||||
## Table of Contents
|
5. You'll be asked to confirm the location of the folder in which
|
||||||
|
to install InvokeAI and its image generation model files. Pick a
|
||||||
|
location with at least 15 GB of free memory. More if you plan on
|
||||||
|
installing lots of models.
|
||||||
|
|
||||||
1. [Installation](#installation)
|
6. Wait while the installer does its thing. After installing the software,
|
||||||
2. [Hardware Requirements](#hardware-requirements)
|
the installer will launch a script that lets you configure InvokeAI and
|
||||||
3. [Features](#features)
|
select a set of starting image generation models.
|
||||||
4. [Latest Changes](#latest-changes)
|
|
||||||
5. [Troubleshooting](#troubleshooting)
|
|
||||||
6. [Contributing](#contributing)
|
|
||||||
7. [Contributors](#contributors)
|
|
||||||
8. [Support](#support)
|
|
||||||
9. [Further Reading](#further-reading)
|
|
||||||
|
|
||||||
## Installation
|
7. Find the folder that InvokeAI was installed into (it is not the
|
||||||
|
same as the unpacked zip file directory!) The default location of this
|
||||||
|
folder (if you didn't change it in step 5) is `~/invokeai` on
|
||||||
|
Linux/Mac systems, and `C:\Users\YourName\invokeai` on Windows. This directory will contain launcher scripts named `invoke.sh` and `invoke.bat`.
|
||||||
|
|
||||||
|
8. On Windows systems, double-click on the `invoke.bat` file. On
|
||||||
|
macOS, open a Terminal window, drag `invoke.sh` from the folder into
|
||||||
|
the Terminal, and press return. On Linux, run `invoke.sh`
|
||||||
|
|
||||||
|
9. Press 2 to open the "browser-based UI", press enter/return, wait a
|
||||||
|
minute or two for Stable Diffusion to start up, then open your browser
|
||||||
|
and go to http://localhost:9090.
|
||||||
|
|
||||||
|
10. Type `banana sushi` in the box on the top left and click `Invoke`
|
||||||
|
|
||||||
|
### Command-Line Installation (for users familiar with Terminals)
|
||||||
|
|
||||||
|
You must have Python 3.9 or 3.10 installed on your machine. Earlier or later versions are
|
||||||
|
not supported.
|
||||||
|
|
||||||
|
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
||||||
|
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
mkdir invokeai
|
||||||
|
````
|
||||||
|
|
||||||
|
3. Create a virtual environment named `.venv` inside this directory and activate it:
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
cd invokeai
|
||||||
|
python -m venv .venv --prompt InvokeAI
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Activate the virtual environment (do it every time you run InvokeAI)
|
||||||
|
|
||||||
|
_For Linux/Mac users:_
|
||||||
|
|
||||||
|
```sh
|
||||||
|
source .venv/bin/activate
|
||||||
|
```
|
||||||
|
|
||||||
|
_For Windows users:_
|
||||||
|
|
||||||
|
```ps
|
||||||
|
.venv\Scripts\activate
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Install the InvokeAI module and its dependencies. Choose the command suited for your platform & GPU.
|
||||||
|
|
||||||
|
_For Windows/Linux with an NVIDIA GPU:_
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
|
```
|
||||||
|
|
||||||
|
_For Linux with an AMD GPU:_
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
|
```
|
||||||
|
|
||||||
|
_For Macintoshes, either Intel or M1/M2:_
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pip install InvokeAI --use-pep517
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
invokeai-configure
|
||||||
|
```
|
||||||
|
|
||||||
|
7. Launch the web server (do it every time you run InvokeAI):
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
invokeai --web
|
||||||
|
```
|
||||||
|
|
||||||
|
8. Point your browser to http://localhost:9090 to bring up the web interface.
|
||||||
|
9. Type `banana sushi` in the box on the top left and click `Invoke`.
|
||||||
|
|
||||||
|
Be sure to activate the virtual environment each time before re-launching InvokeAI,
|
||||||
|
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
|
||||||
|
|
||||||
|
### Detailed Installation Instructions
|
||||||
|
|
||||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
This fork is supported across Linux, Windows and Macintosh. Linux
|
||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
users can use either an Nvidia-based card (with CUDA support) or an
|
||||||
@ -80,13 +180,13 @@ AMD card (using the ROCm driver). For full installation and upgrade
|
|||||||
instructions, please see:
|
instructions, please see:
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
||||||
|
|
||||||
### Hardware Requirements
|
## Hardware Requirements
|
||||||
|
|
||||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
InvokeAI is supported across Linux, Windows and macOS. Linux
|
||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
users can use either an Nvidia-based card (with CUDA support) or an
|
||||||
AMD card (using the ROCm driver).
|
AMD card (using the ROCm driver).
|
||||||
|
|
||||||
#### System
|
### System
|
||||||
|
|
||||||
You will need one of the following:
|
You will need one of the following:
|
||||||
|
|
||||||
@ -98,11 +198,11 @@ We do not recommend the GTX 1650 or 1660 series video cards. They are
|
|||||||
unable to run in half-precision mode and do not have sufficient VRAM
|
unable to run in half-precision mode and do not have sufficient VRAM
|
||||||
to render 512x512 images.
|
to render 512x512 images.
|
||||||
|
|
||||||
#### Memory
|
### Memory
|
||||||
|
|
||||||
- At least 12 GB Main Memory RAM.
|
- At least 12 GB Main Memory RAM.
|
||||||
|
|
||||||
#### Disk
|
### Disk
|
||||||
|
|
||||||
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||||
|
|
||||||
@ -152,13 +252,15 @@ Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
|
|||||||
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
||||||
problems and other issues.
|
problems and other issues.
|
||||||
|
|
||||||
# Contributing
|
## Contributing
|
||||||
|
|
||||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||||
cleanup, testing, or code reviews, is very much encouraged to do so.
|
cleanup, testing, or code reviews, is very much encouraged to do so.
|
||||||
|
|
||||||
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
||||||
|
|
||||||
|
If you'd like to help with translation, please see our [translation guide](docs/other/TRANSLATION.md).
|
||||||
|
|
||||||
If you are unfamiliar with how
|
If you are unfamiliar with how
|
||||||
to contribute to GitHub projects, here is a
|
to contribute to GitHub projects, here is a
|
||||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
||||||
@ -175,6 +277,8 @@ This fork is a combined effort of various people from across the world.
|
|||||||
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
||||||
their time, hard work and effort.
|
their time, hard work and effort.
|
||||||
|
|
||||||
|
Thanks to [Weblate](https://weblate.org/) for generously providing translation services to this project.
|
||||||
|
|
||||||
### Support
|
### Support
|
||||||
|
|
||||||
For support, please use this repository's GitHub Issues tracking service, or join the Discord.
|
For support, please use this repository's GitHub Issues tracking service, or join the Discord.
|
||||||
|
@ -147,7 +147,7 @@ echo ***** Installed invoke launcher script ******
|
|||||||
rd /s /q binary_installer installer_files
|
rd /s /q binary_installer installer_files
|
||||||
|
|
||||||
@rem preload the models
|
@rem preload the models
|
||||||
call .venv\Scripts\python scripts\configure_invokeai.py
|
call .venv\Scripts\python ldm\invoke\config\invokeai_configure.py
|
||||||
set err_msg=----- model download clone failed -----
|
set err_msg=----- model download clone failed -----
|
||||||
if %errorlevel% neq 0 goto err_exit
|
if %errorlevel% neq 0 goto err_exit
|
||||||
deactivate
|
deactivate
|
||||||
|
4
coverage/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# Ignore everything in this directory
|
||||||
|
*
|
||||||
|
# Except this file
|
||||||
|
!.gitignore
|
@ -1,71 +1,80 @@
|
|||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG PYTHON_VERSION=3.9
|
ARG PYTHON_VERSION=3.9
|
||||||
##################
|
##################
|
||||||
## base image ##
|
## base image ##
|
||||||
##################
|
##################
|
||||||
FROM python:${PYTHON_VERSION}-slim AS python-base
|
FROM --platform=${TARGETPLATFORM} python:${PYTHON_VERSION}-slim AS python-base
|
||||||
|
|
||||||
# prepare for buildkit cache
|
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean
|
|
||||||
|
|
||||||
# Install necesarry packages
|
# Prepare apt for buildkit cache
|
||||||
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||||
|
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
RUN \
|
RUN \
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
apt-get update \
|
apt-get update \
|
||||||
&& apt-get install \
|
&& apt-get install -y \
|
||||||
-yqq \
|
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
libgl1-mesa-glx=20.3.* \
|
libgl1-mesa-glx=20.3.* \
|
||||||
libglib2.0-0=2.66.* \
|
libglib2.0-0=2.66.* \
|
||||||
libopencv-dev=4.5.* \
|
libopencv-dev=4.5.*
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# set working directory and path
|
# Set working directory and env
|
||||||
ARG APPDIR=/usr/src
|
ARG APPDIR=/usr/src
|
||||||
ARG APPNAME=InvokeAI
|
ARG APPNAME=InvokeAI
|
||||||
WORKDIR ${APPDIR}
|
WORKDIR ${APPDIR}
|
||||||
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
|
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
|
||||||
|
# Keeps Python from generating .pyc files in the container
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE 1
|
||||||
|
# Turns off buffering for easier container logging
|
||||||
|
ENV PYTHONUNBUFFERED 1
|
||||||
|
# Don't fall back to legacy build system
|
||||||
|
ENV PIP_USE_PEP517=1
|
||||||
|
|
||||||
#######################
|
#######################
|
||||||
## build pyproject ##
|
## build pyproject ##
|
||||||
#######################
|
#######################
|
||||||
FROM python-base AS pyproject-builder
|
FROM python-base AS pyproject-builder
|
||||||
ENV PIP_USE_PEP517=1
|
|
||||||
|
|
||||||
# prepare for buildkit cache
|
# Install build dependencies
|
||||||
|
RUN \
|
||||||
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
|
apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
build-essential=12.9 \
|
||||||
|
gcc=4:10.2.* \
|
||||||
|
python3-dev=3.9.*
|
||||||
|
|
||||||
|
# Prepare pip for buildkit cache
|
||||||
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
||||||
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
||||||
RUN mkdir -p ${PIP_CACHE_DIR}
|
RUN mkdir -p ${PIP_CACHE_DIR}
|
||||||
|
|
||||||
# Install dependencies
|
# Create virtual environment
|
||||||
RUN \
|
|
||||||
--mount=type=cache,target=${PIP_CACHE_DIR} \
|
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
apt-get update \
|
|
||||||
&& apt-get install \
|
|
||||||
-yqq \
|
|
||||||
--no-install-recommends \
|
|
||||||
build-essential=12.9 \
|
|
||||||
gcc=4:10.2.* \
|
|
||||||
python3-dev=3.9.* \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# create virtual environment
|
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
python3 -m venv "${APPNAME}" \
|
python3 -m venv "${APPNAME}" \
|
||||||
--upgrade-deps
|
--upgrade-deps
|
||||||
|
|
||||||
# copy sources
|
# Install requirements
|
||||||
COPY --link . .
|
COPY --link pyproject.toml .
|
||||||
|
COPY --link invokeai/version/invokeai_version.py invokeai/version/__init__.py invokeai/version/
|
||||||
# install pyproject.toml
|
|
||||||
ARG PIP_EXTRA_INDEX_URL
|
ARG PIP_EXTRA_INDEX_URL
|
||||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
||||||
ARG PIP_PACKAGE=.
|
|
||||||
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
"${APPDIR}/${APPNAME}/bin/pip" install ${PIP_PACKAGE}
|
"${APPNAME}"/bin/pip install .
|
||||||
|
|
||||||
# build patchmatch
|
# Install pyproject.toml
|
||||||
|
COPY --link . .
|
||||||
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
|
"${APPNAME}/bin/pip" install .
|
||||||
|
|
||||||
|
# Build patchmatch
|
||||||
RUN python3 -c "from patchmatch import patch_match"
|
RUN python3 -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
#####################
|
#####################
|
||||||
@ -73,14 +82,26 @@ RUN python3 -c "from patchmatch import patch_match"
|
|||||||
#####################
|
#####################
|
||||||
FROM python-base AS runtime
|
FROM python-base AS runtime
|
||||||
|
|
||||||
# setup environment
|
# Create a new user
|
||||||
COPY --from=pyproject-builder --link ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
|
ARG UNAME=appuser
|
||||||
ENV INVOKEAI_ROOT=/data
|
RUN useradd \
|
||||||
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
|
--no-log-init \
|
||||||
|
-m \
|
||||||
|
-U \
|
||||||
|
"${UNAME}"
|
||||||
|
|
||||||
# set Entrypoint and default CMD
|
# Create volume directory
|
||||||
|
ARG VOLUME_DIR=/data
|
||||||
|
RUN mkdir -p "${VOLUME_DIR}" \
|
||||||
|
&& chown -hR "${UNAME}:${UNAME}" "${VOLUME_DIR}"
|
||||||
|
|
||||||
|
# Setup runtime environment
|
||||||
|
USER ${UNAME}:${UNAME}
|
||||||
|
COPY --chown=${UNAME}:${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
|
||||||
|
ENV INVOKEAI_ROOT ${VOLUME_DIR}
|
||||||
|
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
|
||||||
|
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
|
||||||
|
EXPOSE 9090
|
||||||
ENTRYPOINT [ "invokeai" ]
|
ENTRYPOINT [ "invokeai" ]
|
||||||
CMD [ "--web", "--host=0.0.0.0" ]
|
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
|
||||||
VOLUME [ "/data" ]
|
VOLUME [ "${VOLUME_DIR}" ]
|
||||||
|
|
||||||
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
|
||||||
|
@ -1,19 +1,24 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
|
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
|
||||||
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
|
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
|
||||||
# CUDA 11.6: https://download.pytorch.org/whl/cu116
|
# Possible Values are:
|
||||||
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
|
# - cpu
|
||||||
# CPU: https://download.pytorch.org/whl/cpu
|
# - cuda
|
||||||
# as found on https://pytorch.org/get-started/locally/
|
# - rocm
|
||||||
|
# Don't forget to also set it when executing run.sh
|
||||||
|
# if it is not set, the script will try to detect the flavor by itself.
|
||||||
|
#
|
||||||
|
# Doc can be found here:
|
||||||
|
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||||
|
|
||||||
SCRIPTDIR=$(dirname "$0")
|
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||||
cd "$SCRIPTDIR" || exit 1
|
cd "$SCRIPTDIR" || exit 1
|
||||||
|
|
||||||
source ./env.sh
|
source ./env.sh
|
||||||
|
|
||||||
DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
|
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
|
||||||
|
|
||||||
# print the settings
|
# print the settings
|
||||||
echo -e "You are using these values:\n"
|
echo -e "You are using these values:\n"
|
||||||
@ -21,23 +26,25 @@ echo -e "Dockerfile:\t\t${DOCKERFILE}"
|
|||||||
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
||||||
echo -e "Volumename:\t\t${VOLUMENAME}"
|
echo -e "Volumename:\t\t${VOLUMENAME}"
|
||||||
echo -e "Platform:\t\t${PLATFORM}"
|
echo -e "Platform:\t\t${PLATFORM}"
|
||||||
echo -e "Registry:\t\t${CONTAINER_REGISTRY}"
|
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
|
||||||
echo -e "Repository:\t\t${CONTAINER_REPOSITORY}"
|
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
|
||||||
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
||||||
|
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
|
||||||
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
||||||
|
|
||||||
# Create docker volume
|
# Create docker volume
|
||||||
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
||||||
echo -e "Volume already exists\n"
|
echo -e "Volume already exists\n"
|
||||||
else
|
else
|
||||||
echo -n "createing docker volume "
|
echo -n "creating docker volume "
|
||||||
docker volume create "${VOLUMENAME}"
|
docker volume create "${VOLUMENAME}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Build Container
|
# Build Container
|
||||||
DOCKER_BUILDKIT=1 docker build \
|
docker build \
|
||||||
--platform="${PLATFORM}" \
|
--platform="${PLATFORM:-linux/amd64}" \
|
||||||
--tag="${CONTAINER_IMAGE}" \
|
--tag="${CONTAINER_IMAGE:-invokeai}" \
|
||||||
|
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
|
||||||
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
||||||
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
||||||
--file="${DOCKERFILE}" \
|
--file="${DOCKERFILE}" \
|
||||||
|
@ -1,19 +1,31 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# This file is used to set environment variables for the build.sh and run.sh scripts.
|
||||||
|
|
||||||
|
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
|
||||||
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
||||||
|
|
||||||
|
# Activate virtual environment if not already activated and exists
|
||||||
|
if [[ -z $VIRTUAL_ENV ]]; then
|
||||||
|
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
|
||||||
|
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
|
||||||
|
&& echo "Activated virtual environment: $VIRTUAL_ENV"
|
||||||
|
fi
|
||||||
|
|
||||||
# Decide which container flavor to build if not specified
|
# Decide which container flavor to build if not specified
|
||||||
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
||||||
# Check for CUDA and ROCm
|
# Check for CUDA and ROCm
|
||||||
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
||||||
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
||||||
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
|
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
|
||||||
CONTAINER_FLAVOR="cuda"
|
CONTAINER_FLAVOR="cuda"
|
||||||
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
|
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
|
||||||
CONTAINER_FLAVOR="rocm"
|
CONTAINER_FLAVOR="rocm"
|
||||||
else
|
else
|
||||||
CONTAINER_FLAVOR="cpu"
|
CONTAINER_FLAVOR="cpu"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
||||||
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
||||||
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
||||||
@ -26,9 +38,10 @@ fi
|
|||||||
|
|
||||||
# Variables shared by build.sh and run.sh
|
# Variables shared by build.sh and run.sh
|
||||||
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
|
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
|
||||||
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}"
|
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
|
||||||
|
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
|
||||||
ARCH="${ARCH-$(uname -m)}"
|
ARCH="${ARCH-$(uname -m)}"
|
||||||
PLATFORM="${PLATFORM-Linux/${ARCH}}"
|
PLATFORM="${PLATFORM-linux/${ARCH}}"
|
||||||
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
||||||
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
||||||
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
||||||
@ -36,3 +49,6 @@ CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}"
|
|||||||
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
|
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
|
||||||
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
|
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
|
||||||
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
|
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
|
||||||
|
|
||||||
|
# enable docker buildkit
|
||||||
|
export DOCKER_BUILDKIT=1
|
||||||
|
@ -1,14 +1,16 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
|
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
|
||||||
|
|
||||||
SCRIPTDIR=$(dirname "$0")
|
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||||
cd "$SCRIPTDIR" || exit 1
|
cd "$SCRIPTDIR" || exit 1
|
||||||
|
|
||||||
source ./env.sh
|
source ./env.sh
|
||||||
|
|
||||||
|
# Create outputs directory if it does not exist
|
||||||
|
[[ -d ./outputs ]] || mkdir ./outputs
|
||||||
|
|
||||||
echo -e "You are using these values:\n"
|
echo -e "You are using these values:\n"
|
||||||
echo -e "Volumename:\t${VOLUMENAME}"
|
echo -e "Volumename:\t${VOLUMENAME}"
|
||||||
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
|
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
|
||||||
@ -19,13 +21,21 @@ docker run \
|
|||||||
--tty \
|
--tty \
|
||||||
--rm \
|
--rm \
|
||||||
--platform="${PLATFORM}" \
|
--platform="${PLATFORM}" \
|
||||||
--name="${REPOSITORY_NAME,,}" \
|
--name="${REPOSITORY_NAME}" \
|
||||||
--hostname="${REPOSITORY_NAME,,}" \
|
--hostname="${REPOSITORY_NAME}" \
|
||||||
--mount=source="${VOLUMENAME}",target=/data \
|
--mount type=volume,volume-driver=local,source="${VOLUMENAME}",target=/data \
|
||||||
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
|
--mount type=bind,source="$(pwd)"/outputs/,target=/data/outputs/ \
|
||||||
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
||||||
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
||||||
--publish=9090:9090 \
|
--publish=9090:9090 \
|
||||||
--cap-add=sys_nice \
|
--cap-add=sys_nice \
|
||||||
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
||||||
"${CONTAINER_IMAGE}" ${1:+$@}
|
"${CONTAINER_IMAGE}" ${@:+$@}
|
||||||
|
|
||||||
|
echo -e "\nCleaning trash folder ..."
|
||||||
|
for f in outputs/.Trash*; do
|
||||||
|
if [ -e "$f" ]; then
|
||||||
|
rm -Rf "$f"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
BIN
docs/assets/contributing/html-detail.png
Normal file
After Width: | Height: | Size: 470 KiB |
BIN
docs/assets/contributing/html-overview.png
Normal file
After Width: | Height: | Size: 457 KiB |
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 84 KiB |
BIN
docs/assets/installer-walkthrough/installing-models.png
Normal file
After Width: | Height: | Size: 128 KiB |
BIN
docs/assets/installer-walkthrough/settings-form.png
Normal file
After Width: | Height: | Size: 114 KiB |
93
docs/contributing/ARCHITECTURE.md
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
# Invoke.AI Architecture
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TB
|
||||||
|
|
||||||
|
subgraph apps[Applications]
|
||||||
|
webui[WebUI]
|
||||||
|
cli[CLI]
|
||||||
|
|
||||||
|
subgraph webapi[Web API]
|
||||||
|
api[HTTP API]
|
||||||
|
sio[Socket.IO]
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph invoke[Invoke]
|
||||||
|
direction LR
|
||||||
|
invoker
|
||||||
|
services
|
||||||
|
sessions
|
||||||
|
invocations
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph core[AI Core]
|
||||||
|
Generate
|
||||||
|
end
|
||||||
|
|
||||||
|
webui --> webapi
|
||||||
|
webapi --> invoke
|
||||||
|
cli --> invoke
|
||||||
|
|
||||||
|
invoker --> services & sessions
|
||||||
|
invocations --> services
|
||||||
|
sessions --> invocations
|
||||||
|
|
||||||
|
services --> core
|
||||||
|
|
||||||
|
%% Styles
|
||||||
|
classDef sg fill:#5028C8,font-weight:bold,stroke-width:2,color:#fff,stroke:#14141A
|
||||||
|
classDef default stroke-width:2px,stroke:#F6B314,color:#fff,fill:#14141A
|
||||||
|
|
||||||
|
class apps,webapi,invoke,core sg
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Applications
|
||||||
|
|
||||||
|
Applications are built on top of the invoke framework. They should construct `invoker` and then interact through it. They should avoid interacting directly with core code in order to support a variety of configurations.
|
||||||
|
|
||||||
|
### Web UI
|
||||||
|
|
||||||
|
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/frontend` and the backend code is found in `/ldm/invoke/app/api_app.py` and `/ldm/invoke/app/api/`. The code is further organized as such:
|
||||||
|
|
||||||
|
| Component | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| api_app.py | Sets up the API app, annotates the OpenAPI spec with additional data, and runs the API |
|
||||||
|
| dependencies | Creates all invoker services and the invoker, and provides them to the API |
|
||||||
|
| events | An eventing system that could in the future be adapted to support horizontal scale-out |
|
||||||
|
| sockets | The Socket.IO interface - handles listening to and emitting session events (events are defined in the events service module) |
|
||||||
|
| routers | API definitions for different areas of API functionality |
|
||||||
|
|
||||||
|
### CLI
|
||||||
|
|
||||||
|
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/ldm/invoke/app/cli_app.py`.
|
||||||
|
|
||||||
|
## Invoke
|
||||||
|
|
||||||
|
The Invoke framework provides the interface to the underlying AI systems and is built with flexibility and extensibility in mind. There are four major concepts: invoker, sessions, invocations, and services.
|
||||||
|
|
||||||
|
### Invoker
|
||||||
|
|
||||||
|
The invoker (`/ldm/invoke/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
|
||||||
|
- **invocation services**, which are used by invocations to interact with core functionality.
|
||||||
|
- **invoker services**, which are used by the invoker to manage sessions and manage the invocation queue.
|
||||||
|
|
||||||
|
### Sessions
|
||||||
|
|
||||||
|
Invocations and links between them form a graph, which is maintained in a session. Sessions can be queued for invocation, which will execute their graph (either the next ready invocation, or all invocations). Sessions also maintain execution history for the graph (including storage of any outputs). An invocation may be added to a session at any time, and there is capability to add and entire graph at once, as well as to automatically link new invocations to previous invocations. Invocations can not be deleted or modified once added.
|
||||||
|
|
||||||
|
The session graph does not support looping. This is left as an application problem to prevent additional complexity in the graph.
|
||||||
|
|
||||||
|
### Invocations
|
||||||
|
|
||||||
|
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/ldm/invoke/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
|
||||||
|
|
||||||
|
### Services
|
||||||
|
|
||||||
|
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/ldm/invoke/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
|
||||||
|
|
||||||
|
## AI Core
|
||||||
|
|
||||||
|
The AI Core is represented by the rest of the code base (i.e. the code outside of `/ldm/invoke/app/`).
|
202
docs/contributing/INVOCATIONS.md
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
# Invocations
|
||||||
|
|
||||||
|
Invocations represent a single operation, its inputs, and its outputs. These
|
||||||
|
operations and their outputs can be chained together to generate and modify
|
||||||
|
images.
|
||||||
|
|
||||||
|
## Creating a new invocation
|
||||||
|
|
||||||
|
To create a new invocation, either find the appropriate module file in
|
||||||
|
`/ldm/invoke/app/invocations` to add your invocation to, or create a new one in
|
||||||
|
that folder. All invocations in that folder will be discovered and made
|
||||||
|
available to the CLI and API automatically. Invocations make use of
|
||||||
|
[typing](https://docs.python.org/3/library/typing.html) and
|
||||||
|
[pydantic](https://pydantic-docs.helpmanual.io/) for validation and integration
|
||||||
|
into the CLI and API.
|
||||||
|
|
||||||
|
An invocation looks like this:
|
||||||
|
|
||||||
|
```py
|
||||||
|
class UpscaleInvocation(BaseInvocation):
|
||||||
|
"""Upscales an image."""
|
||||||
|
type: Literal['upscale'] = 'upscale'
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField,None] = Field(description="The input image")
|
||||||
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||||
|
level: Literal[2,4] = Field(default=2, description = "The upscale level")
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(self.image.image_type, self.image.image_name)
|
||||||
|
results = context.services.generate.upscale_and_reconstruct(
|
||||||
|
image_list = [[image, 0]],
|
||||||
|
upscale = (self.level, self.strength),
|
||||||
|
strength = 0.0, # GFPGAN strength
|
||||||
|
save_original = False,
|
||||||
|
image_callback = None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now
|
||||||
|
# TODO: can this return multiple results?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
|
||||||
|
context.services.images.save(image_type, image_name, results[0][0])
|
||||||
|
return ImageOutput(
|
||||||
|
image = ImageField(image_type = image_type, image_name = image_name)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Each portion is important to implement correctly.
|
||||||
|
|
||||||
|
### Class definition and type
|
||||||
|
|
||||||
|
```py
|
||||||
|
class UpscaleInvocation(BaseInvocation):
|
||||||
|
"""Upscales an image."""
|
||||||
|
type: Literal['upscale'] = 'upscale'
|
||||||
|
```
|
||||||
|
|
||||||
|
All invocations must derive from `BaseInvocation`. They should have a docstring
|
||||||
|
that declares what they do in a single, short line. They should also have a
|
||||||
|
`type` with a type hint that's `Literal["command_name"]`, where `command_name`
|
||||||
|
is what the user will type on the CLI or use in the API to create this
|
||||||
|
invocation. The `command_name` must be unique. The `type` must be assigned to
|
||||||
|
the value of the literal in the type hint.
|
||||||
|
|
||||||
|
### Inputs
|
||||||
|
|
||||||
|
```py
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField,None] = Field(description="The input image")
|
||||||
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||||
|
level: Literal[2,4] = Field(default=2, description="The upscale level")
|
||||||
|
```
|
||||||
|
|
||||||
|
Inputs consist of three parts: a name, a type hint, and a `Field` with default,
|
||||||
|
description, and validation information. For example:
|
||||||
|
|
||||||
|
| Part | Value | Description |
|
||||||
|
| --------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| Name | `strength` | This field is referred to as `strength` |
|
||||||
|
| Type Hint | `float` | This field must be of type `float` |
|
||||||
|
| Field | `Field(default=0.75, gt=0, le=1, description="The strength")` | The default value is `0.75`, the value must be in the range (0,1], and help text will show "The strength" for this field. |
|
||||||
|
|
||||||
|
Notice that `image` has type `Union[ImageField,None]`. The `Union` allows this
|
||||||
|
field to be parsed with `None` as a value, which enables linking to previous
|
||||||
|
invocations. All fields should either provide a default value or allow `None` as
|
||||||
|
a value, so that they can be overwritten with a linked output from another
|
||||||
|
invocation.
|
||||||
|
|
||||||
|
The special type `ImageField` is also used here. All images are passed as
|
||||||
|
`ImageField`, which protects them from pydantic validation errors (since images
|
||||||
|
only ever come from links).
|
||||||
|
|
||||||
|
Finally, note that for all linking, the `type` of the linked fields must match.
|
||||||
|
If the `name` also matches, then the field can be **automatically linked** to a
|
||||||
|
previous invocation by name and matching.
|
||||||
|
|
||||||
|
### Invoke Function
|
||||||
|
|
||||||
|
```py
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(self.image.image_type, self.image.image_name)
|
||||||
|
results = context.services.generate.upscale_and_reconstruct(
|
||||||
|
image_list = [[image, 0]],
|
||||||
|
upscale = (self.level, self.strength),
|
||||||
|
strength = 0.0, # GFPGAN strength
|
||||||
|
save_original = False,
|
||||||
|
image_callback = None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
|
||||||
|
context.services.images.save(image_type, image_name, results[0][0])
|
||||||
|
return ImageOutput(
|
||||||
|
image = ImageField(image_type = image_type, image_name = image_name)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
The `invoke` function is the last portion of an invocation. It is provided an
|
||||||
|
`InvocationContext` which contains services to perform work as well as a
|
||||||
|
`session_id` for use as needed. It should return a class with output values that
|
||||||
|
derives from `BaseInvocationOutput`.
|
||||||
|
|
||||||
|
Before being called, the invocation will have all of its fields set from
|
||||||
|
defaults, inputs, and finally links (overriding in that order).
|
||||||
|
|
||||||
|
Assume that this invocation may be running simultaneously with other
|
||||||
|
invocations, may be running on another machine, or in other interesting
|
||||||
|
scenarios. If you need functionality, please provide it as a service in the
|
||||||
|
`InvocationServices` class, and make sure it can be overridden.
|
||||||
|
|
||||||
|
### Outputs
|
||||||
|
|
||||||
|
```py
|
||||||
|
class ImageOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output an image"""
|
||||||
|
type: Literal['image'] = 'image'
|
||||||
|
|
||||||
|
image: ImageField = Field(default=None, description="The output image")
|
||||||
|
```
|
||||||
|
|
||||||
|
Output classes look like an invocation class without the invoke method. Prefer
|
||||||
|
to use an existing output class if available, and prefer to name inputs the same
|
||||||
|
as outputs when possible, to promote automatic invocation linking.
|
||||||
|
|
||||||
|
## Schema Generation
|
||||||
|
|
||||||
|
Invocation, output and related classes are used to generate an OpenAPI schema.
|
||||||
|
|
||||||
|
### Required Properties
|
||||||
|
|
||||||
|
The schema generation treat all properties with default values as optional. This
|
||||||
|
makes sense internally, but when when using these classes via the generated
|
||||||
|
schema, we end up with e.g. the `ImageOutput` class having its `image` property
|
||||||
|
marked as optional.
|
||||||
|
|
||||||
|
We know that this property will always be present, so the additional logic
|
||||||
|
needed to always check if the property exists adds a lot of extraneous cruft.
|
||||||
|
|
||||||
|
To fix this, we can leverage `pydantic`'s
|
||||||
|
[schema customisation](https://docs.pydantic.dev/usage/schema/#schema-customization)
|
||||||
|
to mark properties that we know will always be present as required.
|
||||||
|
|
||||||
|
Here's that `ImageOutput` class, without the needed schema customisation:
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ImageOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output an image"""
|
||||||
|
|
||||||
|
type: Literal["image"] = "image"
|
||||||
|
image: ImageField = Field(default=None, description="The output image")
|
||||||
|
```
|
||||||
|
|
||||||
|
The generated OpenAPI schema, and all clients/types generated from it, will have
|
||||||
|
the `type` and `image` properties marked as optional, even though we know they
|
||||||
|
will always have a value by the time we can interact with them via the API.
|
||||||
|
|
||||||
|
Here's the same class, but with the schema customisation added:
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ImageOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output an image"""
|
||||||
|
|
||||||
|
type: Literal["image"] = "image"
|
||||||
|
image: ImageField = Field(default=None, description="The output image")
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {
|
||||||
|
'required': [
|
||||||
|
'type',
|
||||||
|
'image',
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The resultant schema (and any API client or types generated from it) will now
|
||||||
|
have see `type` as string literal `"image"` and `image` as an `ImageField`
|
||||||
|
object.
|
||||||
|
|
||||||
|
See this `pydantic` issue for discussion on this solution:
|
||||||
|
<https://github.com/pydantic/pydantic/discussions/4577>
|
83
docs/contributing/LOCAL_DEVELOPMENT.md
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
# Local Development
|
||||||
|
|
||||||
|
If you are looking to contribute you will need to have a local development
|
||||||
|
environment. See the
|
||||||
|
[Developer Install](../installation/020_INSTALL_MANUAL.md#developer-install) for
|
||||||
|
full details.
|
||||||
|
|
||||||
|
Broadly this involves cloning the repository, installing the pre-reqs, and
|
||||||
|
InvokeAI (in editable form). Assuming this is working, choose your area of
|
||||||
|
focus.
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
We use [mkdocs](https://www.mkdocs.org) for our documentation with the
|
||||||
|
[material theme](https://squidfunk.github.io/mkdocs-material/). Documentation is
|
||||||
|
written in markdown files under the `./docs` folder and then built into a static
|
||||||
|
website for hosting with GitHub Pages at
|
||||||
|
[invoke-ai.github.io/InvokeAI](https://invoke-ai.github.io/InvokeAI).
|
||||||
|
|
||||||
|
To contribute to the documentation you'll need to install the dependencies. Note
|
||||||
|
the use of `"`.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pip install ".[docs]"
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, to run the documentation locally with hot-reloading for changes made.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
mkdocs serve
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll then be prompted to connect to `http://127.0.0.1:8080` in order to
|
||||||
|
access.
|
||||||
|
|
||||||
|
## Backend
|
||||||
|
|
||||||
|
The backend is contained within the `./invokeai/backend` folder structure. To
|
||||||
|
get started however please install the development dependencies.
|
||||||
|
|
||||||
|
From the root of the repository run the following command. Note the use of `"`.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pip install ".[test]"
|
||||||
|
```
|
||||||
|
|
||||||
|
This in an optional group of packages which is defined within the
|
||||||
|
`pyproject.toml` and will be required for testing the changes you make the the
|
||||||
|
code.
|
||||||
|
|
||||||
|
### Running Tests
|
||||||
|
|
||||||
|
We use [pytest](https://docs.pytest.org/en/7.2.x/) for our test suite. Tests can
|
||||||
|
be found under the `./tests` folder and can be run with a single `pytest`
|
||||||
|
command. Optionally, to review test coverage you can append `--cov`.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pytest --cov
|
||||||
|
```
|
||||||
|
|
||||||
|
Test outcomes and coverage will be reported in the terminal. In addition a more
|
||||||
|
detailed report is created in both XML and HTML format in the `./coverage`
|
||||||
|
folder. The HTML one in particular can help identify missing statements
|
||||||
|
requiring tests to ensure coverage. This can be run by opening
|
||||||
|
`./coverage/html/index.html`.
|
||||||
|
|
||||||
|
For example.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pytest --cov; open ./coverage/html/index.html
|
||||||
|
```
|
||||||
|
|
||||||
|
??? info "HTML coverage report output"
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Front End
|
||||||
|
|
||||||
|
<!--#TODO: get input from blessedcoolant here, for the moment inserted the frontend README via snippets extension.-->
|
||||||
|
|
||||||
|
--8<-- "invokeai/frontend/web/README.md"
|
@ -214,6 +214,8 @@ Here are the invoke> command that apply to txt2img:
|
|||||||
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
|
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
|
||||||
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
||||||
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
||||||
|
| `--h_symmetry_time_pct <float>` | | `None` | Create symmetry along the X axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
||||||
|
| `--v_symmetry_time_pct <float>` | | `None` | Create symmetry along the Y axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
|
|
||||||
|
@ -168,11 +168,15 @@ used by Stable Diffusion 1.4 and 1.5.
|
|||||||
After installation, your `models.yaml` should contain an entry that looks like
|
After installation, your `models.yaml` should contain an entry that looks like
|
||||||
this one:
|
this one:
|
||||||
|
|
||||||
inpainting-1.5: weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
```yml
|
||||||
description: SD inpainting v1.5 config:
|
inpainting-1.5:
|
||||||
configs/stable-diffusion/v1-inpainting-inference.yaml vae:
|
weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
||||||
models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt width: 512
|
description: SD inpainting v1.5
|
||||||
height: 512
|
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||||
|
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
|
width: 512
|
||||||
|
height: 512
|
||||||
|
```
|
||||||
|
|
||||||
As shown in the example, you may include a VAE fine-tuning weights file as well.
|
As shown in the example, you may include a VAE fine-tuning weights file as well.
|
||||||
This is strongly recommended.
|
This is strongly recommended.
|
||||||
|
@ -40,7 +40,7 @@ for adj in adjectives:
|
|||||||
print(f'a {adj} day -A{samp} -C{cg}')
|
print(f'a {adj} day -A{samp} -C{cg}')
|
||||||
```
|
```
|
||||||
|
|
||||||
It's output looks like this (abbreviated):
|
Its output looks like this (abbreviated):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
a sunny day -Aklms -C7.5
|
a sunny day -Aklms -C7.5
|
||||||
@ -268,7 +268,7 @@ model is so good at inpainting, a good substitute is to use the `clipseg` text
|
|||||||
masking option:
|
masking option:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invoke> a fluffy cat eating a hotdot
|
invoke> a fluffy cat eating a hotdog
|
||||||
Outputs:
|
Outputs:
|
||||||
[1010] outputs/000025.2182095108.png: a fluffy cat eating a hotdog
|
[1010] outputs/000025.2182095108.png: a fluffy cat eating a hotdog
|
||||||
invoke> a smiling dog eating a hotdog -I 000025.2182095108.png -tm cat
|
invoke> a smiling dog eating a hotdog -I 000025.2182095108.png -tm cat
|
||||||
|
@ -17,7 +17,7 @@ notebooks.
|
|||||||
|
|
||||||
You will need a GPU to perform training in a reasonable length of
|
You will need a GPU to perform training in a reasonable length of
|
||||||
time, and at least 12 GB of VRAM. We recommend using the [`xformers`
|
time, and at least 12 GB of VRAM. We recommend using the [`xformers`
|
||||||
library](../installation/070_INSTALL_XFORMERS) to accelerate the
|
library](../installation/070_INSTALL_XFORMERS.md) to accelerate the
|
||||||
training process further. During training, about ~8 GB is temporarily
|
training process further. During training, about ~8 GB is temporarily
|
||||||
needed in order to store intermediate models, checkpoints and logs.
|
needed in order to store intermediate models, checkpoints and logs.
|
||||||
|
|
||||||
@ -250,6 +250,24 @@ invokeai-ti \
|
|||||||
--only_save_embeds
|
--only_save_embeds
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Using Embeddings
|
||||||
|
|
||||||
|
After training completes, the resultant embeddings will be saved into your `$INVOKEAI_ROOT/embeddings/<trigger word>/learned_embeds.bin`.
|
||||||
|
|
||||||
|
These will be automatically loaded when you start InvokeAI.
|
||||||
|
|
||||||
|
Add the trigger word, surrounded by angle brackets, to use that embedding. For example, if your trigger word was `terence`, use `<terence>` in prompts. This is the same syntax used by the HuggingFace concepts library.
|
||||||
|
|
||||||
|
**Note:** `.pt` embeddings do not require the angle brackets.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### `Cannot load embedding for <trigger>. It was trained on a model with token dimension 1024, but the current model has token dimension 768`
|
||||||
|
|
||||||
|
Messages like this indicate you trained the embedding on a different base model than the currently selected one.
|
||||||
|
|
||||||
|
For example, in the error above, the training was done on SD2.1 (768x768) but it was used on SD1.5 (512x512).
|
||||||
|
|
||||||
## Reading
|
## Reading
|
||||||
|
|
||||||
For more information on textual inversion, please see the following
|
For more information on textual inversion, please see the following
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
<!-- HTML for static distribution bundle build -->
|
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<title>Swagger UI</title>
|
|
||||||
<link rel="stylesheet" type="text/css" href="swagger-ui/swagger-ui.css" />
|
|
||||||
<link rel="stylesheet" type="text/css" href="swagger-ui/index.css" />
|
|
||||||
<link rel="icon" type="image/png" href="swagger-ui/favicon-32x32.png" sizes="32x32" />
|
|
||||||
<link rel="icon" type="image/png" href="swagger-ui/favicon-16x16.png" sizes="16x16" />
|
|
||||||
</head>
|
|
||||||
|
|
||||||
<body>
|
|
||||||
<div id="swagger-ui"></div>
|
|
||||||
<script src="swagger-ui/swagger-ui-bundle.js" charset="UTF-8"> </script>
|
|
||||||
<script src="swagger-ui/swagger-ui-standalone-preset.js" charset="UTF-8"> </script>
|
|
||||||
<script src="swagger-ui/swagger-initializer.js" charset="UTF-8"> </script>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -40,9 +40,10 @@ experimental versions later.
|
|||||||
this, open up a command-line window ("Terminal" on Linux and
|
this, open up a command-line window ("Terminal" on Linux and
|
||||||
Macintosh, "Command" or "Powershell" on Windows) and type `python
|
Macintosh, "Command" or "Powershell" on Windows) and type `python
|
||||||
--version`. If Python is installed, it will print out the version
|
--version`. If Python is installed, it will print out the version
|
||||||
number. If it is version `3.9.1` or `3.10.x`, you meet
|
number. If it is version `3.9.*` or `3.10.*`, you meet
|
||||||
requirements.
|
requirements. We do not recommend using Python 3.11 or higher,
|
||||||
|
as not all the libraries that InvokeAI depends on work properly
|
||||||
|
with this version.
|
||||||
|
|
||||||
!!! warning "What to do if you have an unsupported version"
|
!!! warning "What to do if you have an unsupported version"
|
||||||
|
|
||||||
@ -50,8 +51,7 @@ experimental versions later.
|
|||||||
and download the appropriate installer package for your
|
and download the appropriate installer package for your
|
||||||
platform. We recommend [Version
|
platform. We recommend [Version
|
||||||
3.10.9](https://www.python.org/downloads/release/python-3109/),
|
3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||||
which has been extensively tested with InvokeAI. At this time
|
which has been extensively tested with InvokeAI.
|
||||||
we do not recommend Python 3.11.
|
|
||||||
|
|
||||||
_Please select your platform in the section below for platform-specific
|
_Please select your platform in the section below for platform-specific
|
||||||
setup requirements._
|
setup requirements._
|
||||||
@ -150,7 +150,7 @@ experimental versions later.
|
|||||||
|
|
||||||
```cmd
|
```cmd
|
||||||
C:\Documents\Linco> cd InvokeAI-Installer
|
C:\Documents\Linco> cd InvokeAI-Installer
|
||||||
C:\Documents\Linco\invokeAI> install.bat
|
C:\Documents\Linco\invokeAI> .\install.bat
|
||||||
```
|
```
|
||||||
|
|
||||||
7. **Select the location to install InvokeAI**: The script will ask you to choose where to install InvokeAI. Select a
|
7. **Select the location to install InvokeAI**: The script will ask you to choose where to install InvokeAI. Select a
|
||||||
@ -167,6 +167,11 @@ experimental versions later.
|
|||||||
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
||||||
on Macintoshes, where "YourName" is your login name.
|
on Macintoshes, where "YourName" is your login name.
|
||||||
|
|
||||||
|
-If you have previously installed InvokeAI, you will be asked to
|
||||||
|
confirm whether you want to reinstall into this directory. You
|
||||||
|
may choose to reinstall, in which case your version will be upgraded,
|
||||||
|
or choose a different directory.
|
||||||
|
|
||||||
- The script uses tab autocompletion to suggest directory path completions.
|
- The script uses tab autocompletion to suggest directory path completions.
|
||||||
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
||||||
to suggest completions.
|
to suggest completions.
|
||||||
@ -181,11 +186,6 @@ experimental versions later.
|
|||||||
are unsure what GPU you are using, you can ask the installer to
|
are unsure what GPU you are using, you can ask the installer to
|
||||||
guess.
|
guess.
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||

|
|
||||||
</figure>
|
|
||||||
|
|
||||||
|
|
||||||
9. **Watch it go!**: Sit back and let the install script work. It will install the third-party
|
9. **Watch it go!**: Sit back and let the install script work. It will install the third-party
|
||||||
libraries needed by InvokeAI and the application itself.
|
libraries needed by InvokeAI and the application itself.
|
||||||
|
|
||||||
@ -197,25 +197,141 @@ experimental versions later.
|
|||||||
minutes and nothing is happening, you can interrupt the script with ^C. You
|
minutes and nothing is happening, you can interrupt the script with ^C. You
|
||||||
may restart it and it will pick up where it left off.
|
may restart it and it will pick up where it left off.
|
||||||
|
|
||||||
10. **Post-install Configuration**: After installation completes, the installer will launch the
|
|
||||||
configuration script, which will guide you through the first-time
|
|
||||||
process of selecting one or more Stable Diffusion model weights
|
|
||||||
files, downloading and configuring them. We provide a list of
|
|
||||||
popular models that InvokeAI performs well with. However, you can
|
|
||||||
add more weight files later on using the command-line client or
|
|
||||||
the Web UI. See [Installing Models](050_INSTALLING_MODELS.md) for
|
|
||||||
details.
|
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||

|

|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
If you have already downloaded the weights file(s) for another Stable
|
10. **Post-install Configuration**: After installation completes, the
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
installer will launch the configuration form, which will guide you
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
through the first-time process of adjusting some of InvokeAI's
|
||||||
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
|
startup settings. To move around this form use ctrl-N for
|
||||||
|
<N>ext and ctrl-P for <P>revious, or use <tab>
|
||||||
|
and shift-<tab> to move forward and back. Once you are in a
|
||||||
|
multi-checkbox field use the up and down cursor keys to select the
|
||||||
|
item you want, and <space> to toggle it on and off. Within
|
||||||
|
a directory field, pressing <tab> will provide autocomplete
|
||||||
|
options.
|
||||||
|
|
||||||
11. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
|
Generally the defaults are fine, and you can come back to this screen at
|
||||||
|
any time to tweak your system. Here are the options you can adjust:
|
||||||
|
|
||||||
|
- ***Output directory for images***
|
||||||
|
This is the path to a directory in which InvokeAI will store all its
|
||||||
|
generated images.
|
||||||
|
|
||||||
|
- ***NSFW checker***
|
||||||
|
If checked, InvokeAI will test images for potential sexual content
|
||||||
|
and blur them out if found. Note that the NSFW checker consumes
|
||||||
|
an additional 0.6 GB of VRAM on top of the 2-3 GB of VRAM used
|
||||||
|
by most image models. If you have a low VRAM GPU (4-6 GB), you
|
||||||
|
can reduce out of memory errors by disabling the checker.
|
||||||
|
|
||||||
|
- ***HuggingFace Access Token***
|
||||||
|
InvokeAI has the ability to download embedded styles and subjects
|
||||||
|
from the HuggingFace Concept Library on-demand. However, some of
|
||||||
|
the concept library files are password protected. To make download
|
||||||
|
smoother, you can set up an account at huggingface.co, obtain an
|
||||||
|
access token, and paste it into this field. Note that you paste
|
||||||
|
to this screen using ctrl-shift-V
|
||||||
|
|
||||||
|
- ***Free GPU memory after each generation***
|
||||||
|
This is useful for low-memory machines and helps minimize the
|
||||||
|
amount of GPU VRAM used by InvokeAI.
|
||||||
|
|
||||||
|
- ***Enable xformers support if available***
|
||||||
|
If the xformers library was successfully installed, this will activate
|
||||||
|
it to reduce memory consumption and increase rendering speed noticeably.
|
||||||
|
Note that xformers has the side effect of generating slightly different
|
||||||
|
images even when presented with the same seed and other settings.
|
||||||
|
|
||||||
|
- ***Force CPU to be used on GPU systems***
|
||||||
|
This will use the (slow) CPU rather than the accelerated GPU. This
|
||||||
|
can be used to generate images on systems that don't have a compatible
|
||||||
|
GPU.
|
||||||
|
|
||||||
|
- ***Precision***
|
||||||
|
This controls whether to use float32 or float16 arithmetic.
|
||||||
|
float16 uses less memory but is also slightly less accurate.
|
||||||
|
Ordinarily the right arithmetic is picked automatically ("auto"),
|
||||||
|
but you may have to use float32 to get images on certain systems
|
||||||
|
and graphics cards. The "autocast" option is deprecated and
|
||||||
|
shouldn't be used unless you are asked to by a member of the team.
|
||||||
|
|
||||||
|
- ***Number of models to cache in CPU memory***
|
||||||
|
This allows you to keep models in memory and switch rapidly among
|
||||||
|
them rather than having them load from disk each time. This slider
|
||||||
|
controls how many models to keep loaded at once. Each
|
||||||
|
model will use 2-4 GB of RAM, so use this cautiously
|
||||||
|
|
||||||
|
- ***Directory containing embedding/textual inversion files***
|
||||||
|
This is the directory in which you can place custom embedding
|
||||||
|
files (.pt or .bin). During startup, this directory will be
|
||||||
|
scanned and InvokeAI will print out the text terms that
|
||||||
|
are available to trigger the embeddings.
|
||||||
|
|
||||||
|
At the bottom of the screen you will see a checkbox for accepting
|
||||||
|
the CreativeML Responsible AI License. You need to accept the license
|
||||||
|
in order to download Stable Diffusion models from the next screen.
|
||||||
|
|
||||||
|
_You can come back to the startup options form_ as many times as you like.
|
||||||
|
From the `invoke.sh` or `invoke.bat` launcher, select option (6) to relaunch
|
||||||
|
this script. On the command line, it is named `invokeai-configure`.
|
||||||
|
|
||||||
|
11. **Downloading Models**: After you press `[NEXT]` on the screen, you will be taken
|
||||||
|
to another screen that prompts you to download a series of starter models. The ones
|
||||||
|
we recommend are preselected for you, but you are encouraged to use the checkboxes to
|
||||||
|
pick and choose.
|
||||||
|
You will probably wish to download `autoencoder-840000` for use with models that
|
||||||
|
were trained with an older version of the Stability VAE.
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|

|
||||||
|
</figure>
|
||||||
|
|
||||||
|
Below the preselected list of starter models is a large text field which you can use
|
||||||
|
to specify a series of models to import. You can specify models in a variety of formats,
|
||||||
|
each separated by a space or newline. The formats accepted are:
|
||||||
|
|
||||||
|
- The path to a .ckpt or .safetensors file. On most systems, you can drag a file from
|
||||||
|
the file browser to the textfield to automatically paste the path. Be sure to remove
|
||||||
|
extraneous quotation marks and other things that come along for the ride.
|
||||||
|
|
||||||
|
- The path to a directory containing a combination of `.ckpt` and `.safetensors` files.
|
||||||
|
The directory will be scanned from top to bottom (including subfolders) and any
|
||||||
|
file that can be imported will be.
|
||||||
|
|
||||||
|
- A URL pointing to a `.ckpt` or `.safetensors` file. You can cut
|
||||||
|
and paste directly from a web page, or simply drag the link from the web page
|
||||||
|
or navigation bar. (You can also use ctrl-shift-V to paste into this field)
|
||||||
|
The file will be downloaded and installed.
|
||||||
|
|
||||||
|
- The HuggingFace repository ID (repo_id) for a `diffusers` model. These IDs have
|
||||||
|
the format _author_name/model_name_, as in `andite/anything-v4.0`
|
||||||
|
|
||||||
|
- The path to a local directory containing a `diffusers`
|
||||||
|
model. These directories always have the file `model_index.json`
|
||||||
|
at their top level.
|
||||||
|
|
||||||
|
_Select a directory for models to import_ You may select a local
|
||||||
|
directory for autoimporting at startup time. If you select this
|
||||||
|
option, the directory you choose will be scanned for new
|
||||||
|
.ckpt/.safetensors files each time InvokeAI starts up, and any new
|
||||||
|
files will be automatically imported and made available for your
|
||||||
|
use.
|
||||||
|
|
||||||
|
_Convert imported models into diffusers_ When legacy checkpoint
|
||||||
|
files are imported, you may select to use them unmodified (the
|
||||||
|
default) or to convert them into `diffusers` models. The latter
|
||||||
|
load much faster and have slightly better rendering performance,
|
||||||
|
but not all checkpoint files can be converted. Note that Stable Diffusion
|
||||||
|
Version 2.X files are **only** supported in `diffusers` format and will
|
||||||
|
be converted regardless.
|
||||||
|
|
||||||
|
_You can come back to the model install form_ as many times as you like.
|
||||||
|
From the `invoke.sh` or `invoke.bat` launcher, select option (5) to relaunch
|
||||||
|
this script. On the command line, it is named `invokeai-model-install`.
|
||||||
|
|
||||||
|
12. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
|
||||||
for the directory `invokeai` installed in the location you chose at the
|
for the directory `invokeai` installed in the location you chose at the
|
||||||
beginning of the install session. Look for a shell script named `invoke.sh`
|
beginning of the install session. Look for a shell script named `invoke.sh`
|
||||||
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
||||||
@ -301,7 +417,7 @@ Then type the following commands:
|
|||||||
|
|
||||||
=== "AMD System"
|
=== "AMD System"
|
||||||
```bash
|
```bash
|
||||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
```
|
```
|
||||||
|
|
||||||
### Corrupted configuration file
|
### Corrupted configuration file
|
||||||
@ -327,6 +443,52 @@ the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
|||||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
||||||
assistance.
|
assistance.
|
||||||
|
|
||||||
|
### Out of Memory Issues
|
||||||
|
|
||||||
|
The models are large, VRAM is expensive, and you may find yourself
|
||||||
|
faced with Out of Memory errors when generating images. Here are some
|
||||||
|
tips to reduce the problem:
|
||||||
|
|
||||||
|
* **4 GB of VRAM**
|
||||||
|
|
||||||
|
This should be adequate for 512x512 pixel images using Stable Diffusion 1.5
|
||||||
|
and derived models, provided that you **disable** the NSFW checker. To
|
||||||
|
disable the filter, do one of the following:
|
||||||
|
|
||||||
|
* Select option (6) "_change InvokeAI startup options_" from the
|
||||||
|
launcher. This will bring up the console-based startup settings
|
||||||
|
dialogue and allow you to unselect the "NSFW Checker" option.
|
||||||
|
* Start the startup settings dialogue directly by running
|
||||||
|
`invokeai-configure --skip-sd-weights --skip-support-models`
|
||||||
|
from the command line.
|
||||||
|
* Find the `invokeai.init` initialization file in the InvokeAI root
|
||||||
|
directory, open it in a text editor, and change `--nsfw_checker`
|
||||||
|
to `--no-nsfw_checker`
|
||||||
|
|
||||||
|
If you are on a CUDA system, you can realize significant memory
|
||||||
|
savings by activating the `xformers` library as described above. The
|
||||||
|
downside is `xformers` introduces non-deterministic behavior, such
|
||||||
|
that images generated with exactly the same prompt and settings will
|
||||||
|
be slightly different from each other. See above for more information.
|
||||||
|
|
||||||
|
* **6 GB of VRAM**
|
||||||
|
|
||||||
|
This is a border case. Using the SD 1.5 series you should be able to
|
||||||
|
generate images up to 640x640 with the NSFW checker enabled, and up to
|
||||||
|
1024x1024 with it disabled and `xformers` activated.
|
||||||
|
|
||||||
|
If you run into persistent memory issues there are a series of
|
||||||
|
environment variables that you can set before launching InvokeAI that
|
||||||
|
alter how the PyTorch machine learning library manages memory. See
|
||||||
|
https://pytorch.org/docs/stable/notes/cuda.html#memory-management for
|
||||||
|
a list of these tweaks.
|
||||||
|
|
||||||
|
* **12 GB of VRAM**
|
||||||
|
|
||||||
|
This should be sufficient to generate larger images up to about
|
||||||
|
1280x1280. If you wish to push further, consider activating
|
||||||
|
`xformers`.
|
||||||
|
|
||||||
### Other Problems
|
### Other Problems
|
||||||
|
|
||||||
If you run into problems during or after installation, the InvokeAI team is
|
If you run into problems during or after installation, the InvokeAI team is
|
||||||
@ -348,25 +510,11 @@ version (recommended), follow these steps:
|
|||||||
1. Start the `invoke.sh`/`invoke.bat` launch script from within the
|
1. Start the `invoke.sh`/`invoke.bat` launch script from within the
|
||||||
`invokeai` root directory.
|
`invokeai` root directory.
|
||||||
|
|
||||||
2. Choose menu item (6) "Developer's Console". This will launch a new
|
2. Choose menu item (10) "Update InvokeAI".
|
||||||
command line.
|
|
||||||
|
|
||||||
3. Type the following command:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install InvokeAI --upgrade
|
|
||||||
```
|
|
||||||
4. Watch the installation run. Once it is complete, you may exit the
|
|
||||||
command line by typing `exit`, and then start InvokeAI from the
|
|
||||||
launch script as per usual.
|
|
||||||
|
|
||||||
|
|
||||||
Alternatively, if you wish to get the most recent unreleased
|
|
||||||
development version, perform the same steps to enter the developer's
|
|
||||||
console, and then type:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install https://github.com/invoke-ai/InvokeAI/archive/refs/heads/main.zip
|
|
||||||
```
|
|
||||||
|
|
||||||
|
3. This will launch a menu that gives you the option of:
|
||||||
|
|
||||||
|
1. Updating to the latest official release;
|
||||||
|
2. Updating to the bleeding-edge development version; or
|
||||||
|
3. Manually entering the tag or branch name of a version of
|
||||||
|
InvokeAI you wish to try out.
|
||||||
|
@ -30,25 +30,35 @@ Installation](010_INSTALL_AUTOMATED.md), and in many cases will
|
|||||||
already be installed (if, for example, you have used your system for
|
already be installed (if, for example, you have used your system for
|
||||||
gaming):
|
gaming):
|
||||||
|
|
||||||
* **Python** version 3.9 or 3.10 (3.11 is not recommended).
|
* **Python**
|
||||||
|
|
||||||
* **CUDA Tools** For those with _NVidia GPUs_, you will need to
|
version 3.9 or 3.10 (3.11 is not recommended).
|
||||||
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
|
|
||||||
|
|
||||||
* **ROCm Tools** For _Linux users with AMD GPUs_, you will need
|
* **CUDA Tools**
|
||||||
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
|
|
||||||
InvokeAI does not support AMD GPUs on Windows systems due to
|
|
||||||
lack of a Windows ROCm library.
|
|
||||||
|
|
||||||
* **Visual C++ Libraries** _Windows users_ must install the free
|
For those with _NVidia GPUs_, you will need to
|
||||||
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
|
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
|
||||||
|
|
||||||
* **The Xcode command line tools** for _Macintosh users_. Instructions are
|
* **ROCm Tools**
|
||||||
available at [Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
|
|
||||||
|
|
||||||
* _Macintosh users_ may also need to run the `Install Certificates` command
|
For _Linux users with AMD GPUs_, you will need
|
||||||
if model downloads give lots of certificate errors. Run:
|
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
|
||||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
InvokeAI does not support AMD GPUs on Windows systems due to
|
||||||
|
lack of a Windows ROCm library.
|
||||||
|
|
||||||
|
* **Visual C++ Libraries**
|
||||||
|
|
||||||
|
_Windows users_ must install the free
|
||||||
|
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
|
||||||
|
|
||||||
|
* **The Xcode command line tools**
|
||||||
|
|
||||||
|
for _Macintosh users_. Instructions are available at
|
||||||
|
[Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
|
||||||
|
|
||||||
|
* _Macintosh users_ may also need to run the `Install Certificates` command
|
||||||
|
if model downloads give lots of certificate errors. Run:
|
||||||
|
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||||
|
|
||||||
### Installation Walkthrough
|
### Installation Walkthrough
|
||||||
|
|
||||||
@ -75,7 +85,7 @@ manager, please follow these steps:
|
|||||||
=== "Linux/Mac"
|
=== "Linux/Mac"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export INVOKEAI_ROOT="~/invokeai"
|
export INVOKEAI_ROOT=~/invokeai
|
||||||
mkdir $INVOKEAI_ROOT
|
mkdir $INVOKEAI_ROOT
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -99,35 +109,30 @@ manager, please follow these steps:
|
|||||||
Windows environment variable using the Advanced System Settings dialogue.
|
Windows environment variable using the Advanced System Settings dialogue.
|
||||||
Refer to your operating system documentation for details.
|
Refer to your operating system documentation for details.
|
||||||
|
|
||||||
|
```terminal
|
||||||
=== "Linux/Mac"
|
cd $INVOKEAI_ROOT
|
||||||
```bash
|
python -m venv .venv --prompt InvokeAI
|
||||||
cd $INVOKEAI_ROOT
|
```
|
||||||
python -m venv create .venv
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Windows"
|
|
||||||
```bash
|
|
||||||
cd $INVOKEAI_ROOT
|
|
||||||
python -m venv create .venv
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Activate the new environment:
|
4. Activate the new environment:
|
||||||
|
|
||||||
=== "Linux/Mac"
|
=== "Linux/Mac"
|
||||||
```bash
|
|
||||||
|
```bash
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Windows"
|
=== "Windows"
|
||||||
```bash
|
|
||||||
.venv\script\activate
|
|
||||||
```
|
|
||||||
If you get a permissions error at this point, run the command
|
|
||||||
`Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser`
|
|
||||||
and try `activate` again.
|
|
||||||
|
|
||||||
The command-line prompt should change to to show `(.venv)` at the
|
```ps
|
||||||
|
.venv\Scripts\activate
|
||||||
|
```
|
||||||
|
|
||||||
|
If you get a permissions error at this point, run this command and try again
|
||||||
|
|
||||||
|
`Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`
|
||||||
|
|
||||||
|
The command-line prompt should change to to show `(InvokeAI)` at the
|
||||||
beginning of the prompt. Note that all the following steps should be
|
beginning of the prompt. Note that all the following steps should be
|
||||||
run while inside the INVOKEAI_ROOT directory
|
run while inside the INVOKEAI_ROOT directory
|
||||||
|
|
||||||
@ -137,40 +142,47 @@ manager, please follow these steps:
|
|||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among CUDA, ROCm and CPU/MPS drivers as shown below:
|
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among
|
||||||
|
CUDA, ROCm and CPU/MPS drivers as shown below:
|
||||||
|
|
||||||
=== "CUDA (NVidia)"
|
=== "CUDA (NVidia)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "ROCm (AMD)"
|
=== "ROCm (AMD)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "CPU (Intel Macs & non-GPU systems)"
|
=== "CPU (Intel Macs & non-GPU systems)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "MPS (M1 and M2 Macs)"
|
=== "MPS (M1 and M2 Macs)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
pip install InvokeAI --use-pep517
|
||||||
```
|
```
|
||||||
|
|
||||||
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
|
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
|
||||||
become available in the environment
|
become available in the environment
|
||||||
|
|
||||||
=== "Linux/Macintosh"
|
=== "Linux/Macintosh"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
deactivate && source .venv/bin/activate
|
deactivate && source .venv/bin/activate
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Windows"
|
=== "Windows"
|
||||||
```bash
|
|
||||||
|
```ps
|
||||||
deactivate
|
deactivate
|
||||||
.venv\Scripts\activate
|
.venv\Scripts\activate
|
||||||
```
|
```
|
||||||
|
|
||||||
8. Set up the runtime directory
|
8. Set up the runtime directory
|
||||||
@ -179,7 +191,7 @@ manager, please follow these steps:
|
|||||||
models, model config files, directory for textual inversion embeddings, and
|
models, model config files, directory for textual inversion embeddings, and
|
||||||
your outputs.
|
your outputs.
|
||||||
|
|
||||||
```bash
|
```terminal
|
||||||
invokeai-configure
|
invokeai-configure
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -283,13 +295,12 @@ on your system, please see the [Git Installation
|
|||||||
Guide](https://github.com/git-guides/install-git)
|
Guide](https://github.com/git-guides/install-git)
|
||||||
|
|
||||||
1. From the command line, run this command:
|
1. From the command line, run this command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||||
```
|
```
|
||||||
|
|
||||||
This will create a directory named `InvokeAI` and populate it with the
|
This will create a directory named `InvokeAI` and populate it with the
|
||||||
full source code from the InvokeAI repository.
|
full source code from the InvokeAI repository.
|
||||||
|
|
||||||
2. Activate the InvokeAI virtual environment as per step (4) of the manual
|
2. Activate the InvokeAI virtual environment as per step (4) of the manual
|
||||||
installation protocol (important!)
|
installation protocol (important!)
|
||||||
@ -304,7 +315,7 @@ installation protocol (important!)
|
|||||||
|
|
||||||
=== "ROCm (AMD)"
|
=== "ROCm (AMD)"
|
||||||
```bash
|
```bash
|
||||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "CPU (Intel Macs & non-GPU systems)"
|
=== "CPU (Intel Macs & non-GPU systems)"
|
||||||
@ -314,7 +325,7 @@ installation protocol (important!)
|
|||||||
|
|
||||||
=== "MPS (M1 and M2 Macs)"
|
=== "MPS (M1 and M2 Macs)"
|
||||||
```bash
|
```bash
|
||||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
pip install -e . --use-pep517
|
||||||
```
|
```
|
||||||
|
|
||||||
Be sure to pass `-e` (for an editable install) and don't forget the
|
Be sure to pass `-e` (for an editable install) and don't forget the
|
||||||
@ -330,5 +341,29 @@ installation protocol (important!)
|
|||||||
repository. You can then use GitHub functions to create and submit
|
repository. You can then use GitHub functions to create and submit
|
||||||
pull requests to contribute improvements to the project.
|
pull requests to contribute improvements to the project.
|
||||||
|
|
||||||
Please see [Contributing](/index.md#Contributing) for hints
|
Please see [Contributing](../index.md#contributing) for hints
|
||||||
on getting started.
|
on getting started.
|
||||||
|
|
||||||
|
### Unsupported Conda Install
|
||||||
|
|
||||||
|
Congratulations, you found the "secret" Conda installation
|
||||||
|
instructions. If you really **really** want to use Conda with InvokeAI
|
||||||
|
you can do so using this unsupported recipe:
|
||||||
|
|
||||||
|
```
|
||||||
|
mkdir ~/invokeai
|
||||||
|
conda create -n invokeai python=3.10
|
||||||
|
conda activate invokeai
|
||||||
|
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
|
invokeai-configure --root ~/invokeai
|
||||||
|
invokeai --root ~/invokeai --web
|
||||||
|
```
|
||||||
|
|
||||||
|
The `pip install` command shown in this recipe is for Linux/Windows
|
||||||
|
systems with an NVIDIA GPU. See step (6) above for the command to use
|
||||||
|
with other platforms/GPU combinations. If you don't wish to pass the
|
||||||
|
`--root` argument to `invokeai` with each launch, you may set the
|
||||||
|
environment variable INVOKEAI_ROOT to point to the installation directory.
|
||||||
|
|
||||||
|
Note that if you run into problems with the Conda installation, the InvokeAI
|
||||||
|
staff will **not** be able to help you out. Caveat Emptor!
|
||||||
|
@ -110,7 +110,7 @@ recipes are available
|
|||||||
|
|
||||||
When installing torch and torchvision manually with `pip`, remember to provide
|
When installing torch and torchvision manually with `pip`, remember to provide
|
||||||
the argument `--extra-index-url
|
the argument `--extra-index-url
|
||||||
https://download.pytorch.org/whl/rocm5.2` as described in the [Manual
|
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
|
||||||
Installation Guide](020_INSTALL_MANUAL.md).
|
Installation Guide](020_INSTALL_MANUAL.md).
|
||||||
|
|
||||||
This will be done automatically for you if you use the installer
|
This will be done automatically for you if you use the installer
|
||||||
|
@ -43,25 +43,31 @@ InvokeAI comes with support for a good set of starter models. You'll
|
|||||||
find them listed in the master models file
|
find them listed in the master models file
|
||||||
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
|
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
|
||||||
subset that are currently installed are found in
|
subset that are currently installed are found in
|
||||||
`configs/models.yaml`. The current list is:
|
`configs/models.yaml`. As of v2.3.1, the list of starter models is:
|
||||||
|
|
||||||
| Model | HuggingFace Repo ID | Description | URL
|
|Model Name | HuggingFace Repo ID | Description | URL |
|
||||||
| -------------------- | --------------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------- |
|
|---------- | ---------- | ----------- | --- |
|
||||||
| stable-diffusion-1.5 | runwayml/stable-diffusion-v1-5 | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
|stable-diffusion-1.5|runwayml/stable-diffusion-v1-5|Stable Diffusion version 1.5 diffusers model (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||||
| stable-diffusion-1.4 | runwayml/stable-diffusion-v1-4 | Previous version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-4 |
|
|sd-inpainting-1.5|runwayml/stable-diffusion-inpainting|RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||||
| inpainting-1.5 | runwayml/stable-diffusion-inpainting | Stable diffusion 1.5 optimized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
|stable-diffusion-2.1|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||||
| stable-diffusion-2.1-base |stabilityai/stable-diffusion-2-1-base | Stable Diffusion version 2.1 trained on 512 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1-base |
|
|sd-inpainting-2.0|stabilityai/stable-diffusion-2-inpainting|Stable Diffusion version 2.0 inpainting model (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-inpainting |
|
||||||
| stable-diffusion-2.1-768 |stabilityai/stable-diffusion-2-1 | Stable Diffusion version 2.1 trained on 768 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
|analog-diffusion-1.0|wavymulder/Analog-Diffusion|An SD-1.5 model trained on diverse analog photographs (2.13 GB)|https://huggingface.co/wavymulder/Analog-Diffusion |
|
||||||
| dreamlike-diffusion-1.0 | dreamlike-art/dreamlike-diffusion-1.0 | An SD 1.5 model finetuned on high quality art | https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0 |
|
|deliberate-1.0|XpucT/Deliberate|Versatile model that produces detailed images up to 768px (4.27 GB)|https://huggingface.co/XpucT/Deliberate |
|
||||||
| dreamlike-photoreal-2.0 | dreamlike-art/dreamlike-photoreal-2.0 | A photorealistic model trained on 768 pixel images| https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
|
|d&d-diffusion-1.0|0xJustin/Dungeons-and-Diffusion|Dungeons & Dragons characters (2.13 GB)|https://huggingface.co/0xJustin/Dungeons-and-Diffusion |
|
||||||
| openjourney-4.0 | prompthero/openjourney | An SD 1.5 model finetuned on Midjourney images prompt with "mdjrny-v4 style" | https://huggingface.co/prompthero/openjourney |
|
|dreamlike-photoreal-2.0|dreamlike-art/dreamlike-photoreal-2.0|A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)|https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
|
||||||
| nitro-diffusion-1.0 | nitrosocke/Nitro-Diffusion | An SD 1.5 model finetuned on three styles, prompt with "archer style", "arcane style" or "modern disney style" | https://huggingface.co/nitrosocke/Nitro-Diffusion|
|
|inkpunk-1.0|Envvi/Inkpunk-Diffusion|Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)|https://huggingface.co/Envvi/Inkpunk-Diffusion |
|
||||||
| trinart-2.0 | naclbit/trinart_stable_diffusion_v2 | An SD 1.5 model finetuned with ~40,000 assorted high resolution manga/anime-style pictures | https://huggingface.co/naclbit/trinart_stable_diffusion_v2|
|
|openjourney-4.0|prompthero/openjourney|An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)|https://huggingface.co/prompthero/openjourney |
|
||||||
| trinart-characters-2_0 | naclbit/trinart_derrida_characters_v2_stable_diffusion | An SD 1.5 model finetuned with 19.2M manga/anime-style pictures | https://huggingface.co/naclbit/trinart_derrida_characters_v2_stable_diffusion|
|
|portrait-plus-1.0|wavymulder/portraitplus|An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)|https://huggingface.co/wavymulder/portraitplus |
|
||||||
|
|seek-art-mega-1.0|coreco/seek.art_MEGA|A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)|https://huggingface.co/coreco/seek.art_MEGA |
|
||||||
|
|trinart-2.0|naclbit/trinart_stable_diffusion_v2|An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)|https://huggingface.co/naclbit/trinart_stable_diffusion_v2 |
|
||||||
|
|waifu-diffusion-1.4|hakurei/waifu-diffusion|An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)|https://huggingface.co/hakurei/waifu-diffusion |
|
||||||
|
|
||||||
Note that these files are covered by an "Ethical AI" license which forbids
|
Note that these files are covered by an "Ethical AI" license which
|
||||||
certain uses. When you initially download them, you are asked to
|
forbids certain uses. When you initially download them, you are asked
|
||||||
accept the license terms.
|
to accept the license terms. In addition, some of these models carry
|
||||||
|
additional license terms that limit their use in commercial
|
||||||
|
applications or on public servers. Be sure to familiarize yourself
|
||||||
|
with the model terms by visiting the URLs in the table above.
|
||||||
|
|
||||||
## Community-Contributed Models
|
## Community-Contributed Models
|
||||||
|
|
||||||
@ -80,6 +86,13 @@ only `.safetensors` and `.ckpt` models, but they can be easily loaded
|
|||||||
into InvokeAI and/or converted into optimized `diffusers` models. Be
|
into InvokeAI and/or converted into optimized `diffusers` models. Be
|
||||||
aware that CIVITAI hosts many models that generate NSFW content.
|
aware that CIVITAI hosts many models that generate NSFW content.
|
||||||
|
|
||||||
|
!!! note
|
||||||
|
|
||||||
|
InvokeAI 2.3.x does not support directly importing and
|
||||||
|
running Stable Diffusion version 2 checkpoint models. You may instead
|
||||||
|
convert them into `diffusers` models using the conversion methods
|
||||||
|
described below.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
There are multiple ways to install and manage models:
|
There are multiple ways to install and manage models:
|
||||||
@ -90,7 +103,7 @@ There are multiple ways to install and manage models:
|
|||||||
models files.
|
models files.
|
||||||
|
|
||||||
3. The web interface (WebUI) has a GUI for importing and managing
|
3. The web interface (WebUI) has a GUI for importing and managing
|
||||||
models.
|
models.
|
||||||
|
|
||||||
### Installation via `invokeai-configure`
|
### Installation via `invokeai-configure`
|
||||||
|
|
||||||
@ -106,7 +119,7 @@ confirm that the files are complete.
|
|||||||
You can install a new model, including any of the community-supported ones, via
|
You can install a new model, including any of the community-supported ones, via
|
||||||
the command-line client's `!import_model` command.
|
the command-line client's `!import_model` command.
|
||||||
|
|
||||||
#### Installing `.ckpt` and `.safetensors` models
|
#### Installing individual `.ckpt` and `.safetensors` models
|
||||||
|
|
||||||
If the model is already downloaded to your local disk, use
|
If the model is already downloaded to your local disk, use
|
||||||
`!import_model /path/to/file.ckpt` to load it. For example:
|
`!import_model /path/to/file.ckpt` to load it. For example:
|
||||||
@ -131,15 +144,40 @@ invoke> !import_model https://example.org/sd_models/martians.safetensors
|
|||||||
For this to work, the URL must not be password-protected. Otherwise
|
For this to work, the URL must not be password-protected. Otherwise
|
||||||
you will receive a 404 error.
|
you will receive a 404 error.
|
||||||
|
|
||||||
When you import a legacy model, the CLI will ask you a few questions
|
When you import a legacy model, the CLI will first ask you what type
|
||||||
about the model, including what size image it was trained on (usually
|
of model this is. You can indicate whether it is a model based on
|
||||||
512x512), what name and description you wish to use for it, what
|
Stable Diffusion 1.x (1.4 or 1.5), one based on Stable Diffusion 2.x,
|
||||||
configuration file to use for it (usually the default
|
or a 1.x inpainting model. Be careful to indicate the correct model
|
||||||
`v1-inference.yaml`), whether you'd like to make this model the
|
type, or it will not load correctly. You can correct the model type
|
||||||
default at startup time, and whether you would like to install a
|
after the fact using the `!edit_model` command.
|
||||||
custom VAE (variable autoencoder) file for the model. For recent
|
|
||||||
models, the answer to the VAE question is usually "no," but it won't
|
The system will then ask you a few other questions about the model,
|
||||||
hurt to answer "yes".
|
including what size image it was trained on (usually 512x512), what
|
||||||
|
name and description you wish to use for it, and whether you would
|
||||||
|
like to install a custom VAE (variable autoencoder) file for the
|
||||||
|
model. For recent models, the answer to the VAE question is usually
|
||||||
|
"no," but it won't hurt to answer "yes".
|
||||||
|
|
||||||
|
After importing, the model will load. If this is successful, you will
|
||||||
|
be asked if you want to keep the model loaded in memory to start
|
||||||
|
generating immediately. You'll also be asked if you wish to make this
|
||||||
|
the default model on startup. You can change this later using
|
||||||
|
`!edit_model`.
|
||||||
|
|
||||||
|
#### Importing a batch of `.ckpt` and `.safetensors` models from a directory
|
||||||
|
|
||||||
|
You may also point `!import_model` to a directory containing a set of
|
||||||
|
`.ckpt` or `.safetensors` files. They will be imported _en masse_.
|
||||||
|
|
||||||
|
!!! example
|
||||||
|
|
||||||
|
```console
|
||||||
|
invoke> !import_model C:/Users/fred/Downloads/civitai_models/
|
||||||
|
```
|
||||||
|
|
||||||
|
You will be given the option to import all models found in the
|
||||||
|
directory, or select which ones to import. If there are subfolders
|
||||||
|
within the directory, they will be searched for models to import.
|
||||||
|
|
||||||
#### Installing `diffusers` models
|
#### Installing `diffusers` models
|
||||||
|
|
||||||
@ -279,19 +317,23 @@ After you save the modified `models.yaml` file relaunch
|
|||||||
### Installation via the WebUI
|
### Installation via the WebUI
|
||||||
|
|
||||||
To access the WebUI Model Manager, click on the button that looks like
|
To access the WebUI Model Manager, click on the button that looks like
|
||||||
a cute in the upper right side of the browser screen. This will bring
|
a cube in the upper right side of the browser screen. This will bring
|
||||||
up a dialogue that lists the models you have already installed, and
|
up a dialogue that lists the models you have already installed, and
|
||||||
allows you to load, delete or edit them:
|
allows you to load, delete or edit them:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
To add a new model, click on **+ Add New** and select to either a
|
To add a new model, click on **+ Add New** and select to either a
|
||||||
checkpoint/safetensors model, or a diffusers model:
|
checkpoint/safetensors model, or a diffusers model:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
In this example, we chose **Add Diffusers**. As shown in the figure
|
In this example, we chose **Add Diffusers**. As shown in the figure
|
||||||
@ -302,7 +344,9 @@ choose to enter a path to disk, the system will autocomplete for you
|
|||||||
as you type:
|
as you type:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
Press **Add Model** at the bottom of the dialogue (scrolled out of
|
Press **Add Model** at the bottom of the dialogue (scrolled out of
|
||||||
@ -317,7 +361,9 @@ directory and press the "Search" icon. This will display the
|
|||||||
subfolders, and allow you to choose which ones to import:
|
subfolders, and allow you to choose which ones to import:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
## Model Management Startup Options
|
## Model Management Startup Options
|
||||||
@ -342,9 +388,8 @@ invoke.sh --autoconvert /home/fred/stable-diffusion-checkpoints
|
|||||||
|
|
||||||
And here is what the same argument looks like in `invokeai.init`:
|
And here is what the same argument looks like in `invokeai.init`:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
--outdir="/home/fred/invokeai/outputs
|
--outdir="/home/fred/invokeai/outputs
|
||||||
--no-nsfw_checker
|
--no-nsfw_checker
|
||||||
--autoconvert /home/fred/stable-diffusion-checkpoints
|
--autoconvert /home/fred/stable-diffusion-checkpoints
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ You need to have opencv installed so that pypatchmatch can be built:
|
|||||||
brew install opencv
|
brew install opencv
|
||||||
```
|
```
|
||||||
|
|
||||||
The next time you start `invoke`, after sucesfully installing opencv, pypatchmatch will be built.
|
The next time you start `invoke`, after successfully installing opencv, pypatchmatch will be built.
|
||||||
|
|
||||||
## Linux
|
## Linux
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ Prior to installing PyPatchMatch, you need to take the following steps:
|
|||||||
|
|
||||||
5. Confirm that pypatchmatch is installed. At the command-line prompt enter
|
5. Confirm that pypatchmatch is installed. At the command-line prompt enter
|
||||||
`python`, and then at the `>>>` line type
|
`python`, and then at the `>>>` line type
|
||||||
`from patchmatch import patch_match`: It should look like the follwing:
|
`from patchmatch import patch_match`: It should look like the following:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
||||||
@ -108,4 +108,4 @@ Prior to installing PyPatchMatch, you need to take the following steps:
|
|||||||
|
|
||||||
[**Next, Follow Steps 4-6 from the Debian Section above**](#linux)
|
[**Next, Follow Steps 4-6 from the Debian Section above**](#linux)
|
||||||
|
|
||||||
If you see no errors, then you're ready to go!
|
If you see no errors you're ready to go!
|
||||||
|
@ -1,73 +0,0 @@
|
|||||||
openapi: 3.0.3
|
|
||||||
info:
|
|
||||||
title: Stable Diffusion
|
|
||||||
description: |-
|
|
||||||
TODO: Description Here
|
|
||||||
|
|
||||||
Some useful links:
|
|
||||||
- [Stable Diffusion Dream Server](https://github.com/lstein/stable-diffusion)
|
|
||||||
|
|
||||||
license:
|
|
||||||
name: MIT License
|
|
||||||
url: https://github.com/lstein/stable-diffusion/blob/main/LICENSE
|
|
||||||
version: 1.0.0
|
|
||||||
servers:
|
|
||||||
- url: http://localhost:9090/api
|
|
||||||
tags:
|
|
||||||
- name: images
|
|
||||||
description: Retrieve and manage generated images
|
|
||||||
paths:
|
|
||||||
/images/{imageId}:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- images
|
|
||||||
summary: Get image by ID
|
|
||||||
description: Returns a single image
|
|
||||||
operationId: getImageById
|
|
||||||
parameters:
|
|
||||||
- name: imageId
|
|
||||||
in: path
|
|
||||||
description: ID of image to return
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
'200':
|
|
||||||
description: successful operation
|
|
||||||
content:
|
|
||||||
image/png:
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
format: binary
|
|
||||||
'404':
|
|
||||||
description: Image not found
|
|
||||||
/intermediates/{intermediateId}/{step}:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- images
|
|
||||||
summary: Get intermediate image by ID
|
|
||||||
description: Returns a single intermediate image
|
|
||||||
operationId: getIntermediateById
|
|
||||||
parameters:
|
|
||||||
- name: intermediateId
|
|
||||||
in: path
|
|
||||||
description: ID of intermediate to return
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
- name: step
|
|
||||||
in: path
|
|
||||||
description: The generation step of the intermediate
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
'200':
|
|
||||||
description: successful operation
|
|
||||||
content:
|
|
||||||
image/png:
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
format: binary
|
|
||||||
'404':
|
|
||||||
description: Intermediate not found
|
|
19
docs/other/TRANSLATION.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# Translation
|
||||||
|
|
||||||
|
InvokeAI uses [Weblate](https://weblate.org) for translation. Weblate is a FOSS project providing a scalable translation service. Weblate automates the tedious parts of managing translation of a growing project, and the service is generously provided at no cost to FOSS projects like InvokeAI.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
If you'd like to contribute by adding or updating a translation, please visit our [Weblate project](https://hosted.weblate.org/engage/invokeai/). You'll need to sign in with your GitHub account (a number of other accounts are supported, including Google).
|
||||||
|
|
||||||
|
Once signed in, select a language and then the Web UI component. From here you can Browse and Translate strings from English to your chosen language. Zen mode offers a simpler translation experience.
|
||||||
|
|
||||||
|
Your changes will be attributed to you in the automated PR process; you don't need to do anything else.
|
||||||
|
|
||||||
|
## Help & Questions
|
||||||
|
|
||||||
|
Please check Weblate's [documentation](https://docs.weblate.org/en/latest/index.html) or ping @psychedelicious or @blessedcoolant on Discord if you have any questions.
|
||||||
|
|
||||||
|
## Thanks
|
||||||
|
|
||||||
|
Thanks to the InvokeAI community for their efforts to translate the project!
|
Before Width: | Height: | Size: 665 B |
Before Width: | Height: | Size: 628 B |
@ -1,16 +0,0 @@
|
|||||||
html {
|
|
||||||
box-sizing: border-box;
|
|
||||||
overflow: -moz-scrollbars-vertical;
|
|
||||||
overflow-y: scroll;
|
|
||||||
}
|
|
||||||
|
|
||||||
*,
|
|
||||||
*:before,
|
|
||||||
*:after {
|
|
||||||
box-sizing: inherit;
|
|
||||||
}
|
|
||||||
|
|
||||||
body {
|
|
||||||
margin: 0;
|
|
||||||
background: #fafafa;
|
|
||||||
}
|
|
@ -1,79 +0,0 @@
|
|||||||
<!doctype html>
|
|
||||||
<html lang="en-US">
|
|
||||||
<head>
|
|
||||||
<title>Swagger UI: OAuth2 Redirect</title>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<script>
|
|
||||||
'use strict';
|
|
||||||
function run () {
|
|
||||||
var oauth2 = window.opener.swaggerUIRedirectOauth2;
|
|
||||||
var sentState = oauth2.state;
|
|
||||||
var redirectUrl = oauth2.redirectUrl;
|
|
||||||
var isValid, qp, arr;
|
|
||||||
|
|
||||||
if (/code|token|error/.test(window.location.hash)) {
|
|
||||||
qp = window.location.hash.substring(1).replace('?', '&');
|
|
||||||
} else {
|
|
||||||
qp = location.search.substring(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
arr = qp.split("&");
|
|
||||||
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
|
|
||||||
qp = qp ? JSON.parse('{' + arr.join() + '}',
|
|
||||||
function (key, value) {
|
|
||||||
return key === "" ? value : decodeURIComponent(value);
|
|
||||||
}
|
|
||||||
) : {};
|
|
||||||
|
|
||||||
isValid = qp.state === sentState;
|
|
||||||
|
|
||||||
if ((
|
|
||||||
oauth2.auth.schema.get("flow") === "accessCode" ||
|
|
||||||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
|
|
||||||
oauth2.auth.schema.get("flow") === "authorization_code"
|
|
||||||
) && !oauth2.auth.code) {
|
|
||||||
if (!isValid) {
|
|
||||||
oauth2.errCb({
|
|
||||||
authId: oauth2.auth.name,
|
|
||||||
source: "auth",
|
|
||||||
level: "warning",
|
|
||||||
message: "Authorization may be unsafe, passed state was changed in server. The passed state wasn't returned from auth server."
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (qp.code) {
|
|
||||||
delete oauth2.state;
|
|
||||||
oauth2.auth.code = qp.code;
|
|
||||||
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
|
|
||||||
} else {
|
|
||||||
let oauthErrorMsg;
|
|
||||||
if (qp.error) {
|
|
||||||
oauthErrorMsg = "["+qp.error+"]: " +
|
|
||||||
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
|
|
||||||
(qp.error_uri ? "More info: "+qp.error_uri : "");
|
|
||||||
}
|
|
||||||
|
|
||||||
oauth2.errCb({
|
|
||||||
authId: oauth2.auth.name,
|
|
||||||
source: "auth",
|
|
||||||
level: "error",
|
|
||||||
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server."
|
|
||||||
});
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
|
|
||||||
}
|
|
||||||
window.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (document.readyState !== 'loading') {
|
|
||||||
run();
|
|
||||||
} else {
|
|
||||||
document.addEventListener('DOMContentLoaded', function () {
|
|
||||||
run();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
</script>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -1,20 +0,0 @@
|
|||||||
window.onload = function() {
|
|
||||||
//<editor-fold desc="Changeable Configuration Block">
|
|
||||||
|
|
||||||
// the following lines will be replaced by docker/configurator, when it runs in a docker-container
|
|
||||||
window.ui = SwaggerUIBundle({
|
|
||||||
url: "openapi3_0.yaml",
|
|
||||||
dom_id: '#swagger-ui',
|
|
||||||
deepLinking: true,
|
|
||||||
presets: [
|
|
||||||
SwaggerUIBundle.presets.apis,
|
|
||||||
SwaggerUIStandalonePreset
|
|
||||||
],
|
|
||||||
plugins: [
|
|
||||||
SwaggerUIBundle.plugins.DownloadUrl
|
|
||||||
],
|
|
||||||
layout: "StandaloneLayout"
|
|
||||||
});
|
|
||||||
|
|
||||||
//</editor-fold>
|
|
||||||
};
|
|
@ -11,19 +11,18 @@ if [[ -v "VIRTUAL_ENV" ]]; then
|
|||||||
exit -1
|
exit -1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)")
|
VERSION=$(cd ..; python -c "from invokeai.version import __version__ as version; print(version)")
|
||||||
PATCH=""
|
PATCH=""
|
||||||
VERSION="v${VERSION}${PATCH}"
|
VERSION="v${VERSION}${PATCH}"
|
||||||
LATEST_TAG="v2.3-latest"
|
LATEST_TAG="v3.0-latest"
|
||||||
|
|
||||||
echo Building installer for version $VERSION
|
echo Building installer for version $VERSION
|
||||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||||
|
|
||||||
read -e -p "Commit and tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
read -e -p "Tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
||||||
RESPONSE=${input:='n'}
|
RESPONSE=${input:='n'}
|
||||||
if [ "$RESPONSE" == 'y' ]; then
|
if [ "$RESPONSE" == 'y' ]; then
|
||||||
git commit -a
|
|
||||||
|
|
||||||
if ! git tag $VERSION ; then
|
if ! git tag $VERSION ; then
|
||||||
echo "Existing/invalid tag"
|
echo "Existing/invalid tag"
|
||||||
@ -32,6 +31,8 @@ if [ "$RESPONSE" == 'y' ]; then
|
|||||||
|
|
||||||
git push origin :refs/tags/$LATEST_TAG
|
git push origin :refs/tags/$LATEST_TAG
|
||||||
git tag -fa $LATEST_TAG
|
git tag -fa $LATEST_TAG
|
||||||
|
|
||||||
|
echo "remember to push --tags!"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ----------------------
|
# ----------------------
|
||||||
|
@ -67,6 +67,8 @@ del /q .tmp1 .tmp2
|
|||||||
@rem -------------- Install and Configure ---------------
|
@rem -------------- Install and Configure ---------------
|
||||||
|
|
||||||
call python .\lib\main.py
|
call python .\lib\main.py
|
||||||
|
pause
|
||||||
|
exit /b
|
||||||
|
|
||||||
@rem ------------------------ Subroutines ---------------
|
@rem ------------------------ Subroutines ---------------
|
||||||
@rem routine to do comparison of semantic version numbers
|
@rem routine to do comparison of semantic version numbers
|
||||||
|
@ -9,13 +9,16 @@ cd $scriptdir
|
|||||||
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
||||||
|
|
||||||
MINIMUM_PYTHON_VERSION=3.9.0
|
MINIMUM_PYTHON_VERSION=3.9.0
|
||||||
|
MAXIMUM_PYTHON_VERSION=3.11.0
|
||||||
PYTHON=""
|
PYTHON=""
|
||||||
for candidate in python3.10 python3.9 python3 python python3.11 ; do
|
for candidate in python3.10 python3.9 python3 python ; do
|
||||||
if ppath=`which $candidate`; then
|
if ppath=`which $candidate`; then
|
||||||
python_version=$($ppath -V | awk '{ print $2 }')
|
python_version=$($ppath -V | awk '{ print $2 }')
|
||||||
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
|
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
|
||||||
PYTHON=$ppath
|
if [ $(version $python_version) -lt $(version "$MAXIMUM_PYTHON_VERSION") ]; then
|
||||||
break
|
PYTHON=$ppath
|
||||||
|
break
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@ -28,3 +31,4 @@ if [ -z "$PYTHON" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
exec $PYTHON ./lib/main.py ${@}
|
exec $PYTHON ./lib/main.py ${@}
|
||||||
|
read -p "Press any key to exit"
|
||||||
|
@ -291,7 +291,7 @@ class InvokeAiInstance:
|
|||||||
src = Path(__file__).parents[1].expanduser().resolve()
|
src = Path(__file__).parents[1].expanduser().resolve()
|
||||||
# if the above directory contains one of these files, we'll do a source install
|
# if the above directory contains one of these files, we'll do a source install
|
||||||
next(src.glob("pyproject.toml"))
|
next(src.glob("pyproject.toml"))
|
||||||
next(src.glob("ldm"))
|
next(src.glob("invokeai"))
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
print("Unable to find a wheel or perform a source install. Giving up.")
|
print("Unable to find a wheel or perform a source install. Giving up.")
|
||||||
|
|
||||||
@ -336,17 +336,32 @@ class InvokeAiInstance:
|
|||||||
elif el in ['-y','--yes','--yes-to-all']:
|
elif el in ['-y','--yes','--yes-to-all']:
|
||||||
new_argv.append(el)
|
new_argv.append(el)
|
||||||
sys.argv = new_argv
|
sys.argv = new_argv
|
||||||
|
|
||||||
|
import requests # to catch download exceptions
|
||||||
from messages import introduction
|
from messages import introduction
|
||||||
|
|
||||||
introduction()
|
introduction()
|
||||||
|
|
||||||
from ldm.invoke.config import invokeai_configure
|
from invokeai.frontend.install import invokeai_configure
|
||||||
|
|
||||||
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
||||||
# from the installer will also automatically propagate down to the config script.
|
# from the installer will also automatically propagate down to the config script.
|
||||||
# this may change in the future with config refactoring!
|
# this may change in the future with config refactoring!
|
||||||
invokeai_configure.main()
|
succeeded = False
|
||||||
|
try:
|
||||||
|
invokeai_configure()
|
||||||
|
succeeded = True
|
||||||
|
except requests.exceptions.ConnectionError as e:
|
||||||
|
print(f'\nA network error was encountered during configuration and download: {str(e)}')
|
||||||
|
except OSError as e:
|
||||||
|
print(f'\nAn OS error was encountered during configuration and download: {str(e)}')
|
||||||
|
except Exception as e:
|
||||||
|
print(f'\nA problem was encountered during the configuration and download steps: {str(e)}')
|
||||||
|
finally:
|
||||||
|
if not succeeded:
|
||||||
|
print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
|
||||||
|
print('and choose option 7 to fix a broken install, optionally followed by option 5 to install models.')
|
||||||
|
print('Alternatively you can relaunch the installer.')
|
||||||
|
|
||||||
def install_user_scripts(self):
|
def install_user_scripts(self):
|
||||||
"""
|
"""
|
||||||
@ -441,7 +456,7 @@ def get_torch_source() -> (Union[str, None],str):
|
|||||||
optional_modules = None
|
optional_modules = None
|
||||||
if OS == "Linux":
|
if OS == "Linux":
|
||||||
if device == "rocm":
|
if device == "rocm":
|
||||||
url = "https://download.pytorch.org/whl/rocm5.2"
|
url = "https://download.pytorch.org/whl/rocm5.4.2"
|
||||||
elif device == "cpu":
|
elif device == "cpu":
|
||||||
url = "https://download.pytorch.org/whl/cpu"
|
url = "https://download.pytorch.org/whl/cpu"
|
||||||
|
|
||||||
|
@ -6,15 +6,20 @@ setlocal
|
|||||||
call .venv\Scripts\activate.bat
|
call .venv\Scripts\activate.bat
|
||||||
set INVOKEAI_ROOT=.
|
set INVOKEAI_ROOT=.
|
||||||
|
|
||||||
|
:start
|
||||||
echo Do you want to generate images using the
|
echo Do you want to generate images using the
|
||||||
echo 1. command-line
|
echo 1. command-line interface
|
||||||
echo 2. browser-based UI
|
echo 2. browser-based UI
|
||||||
echo 3. run textual inversion training
|
echo 3. run textual inversion training
|
||||||
echo 4. merge models (diffusers type only)
|
echo 4. merge models (diffusers type only)
|
||||||
echo 5. re-run the configure script to download new models
|
echo 5. download and install models
|
||||||
echo 6. open the developer console
|
echo 6. change InvokeAI startup options
|
||||||
echo 7. command-line help
|
echo 7. re-run the configure script to fix a broken install
|
||||||
set /P restore="Please enter 1, 2, 3, 4, 5, 6 or 7: [2] "
|
echo 8. open the developer console
|
||||||
|
echo 9. update InvokeAI
|
||||||
|
echo 10. command-line help
|
||||||
|
echo Q - quit
|
||||||
|
set /P restore="Please enter 1-10, Q: [2] "
|
||||||
if not defined restore set restore=2
|
if not defined restore set restore=2
|
||||||
IF /I "%restore%" == "1" (
|
IF /I "%restore%" == "1" (
|
||||||
echo Starting the InvokeAI command-line..
|
echo Starting the InvokeAI command-line..
|
||||||
@ -24,14 +29,20 @@ IF /I "%restore%" == "1" (
|
|||||||
python .venv\Scripts\invokeai.exe --web %*
|
python .venv\Scripts\invokeai.exe --web %*
|
||||||
) ELSE IF /I "%restore%" == "3" (
|
) ELSE IF /I "%restore%" == "3" (
|
||||||
echo Starting textual inversion training..
|
echo Starting textual inversion training..
|
||||||
python .venv\Scripts\invokeai-ti.exe --gui %*
|
python .venv\Scripts\invokeai-ti.exe --gui
|
||||||
) ELSE IF /I "%restore%" == "4" (
|
) ELSE IF /I "%restore%" == "4" (
|
||||||
echo Starting model merging script..
|
echo Starting model merging script..
|
||||||
python .venv\Scripts\invokeai-merge.exe --gui %*
|
python .venv\Scripts\invokeai-merge.exe --gui
|
||||||
) ELSE IF /I "%restore%" == "5" (
|
) ELSE IF /I "%restore%" == "5" (
|
||||||
echo Running invokeai-configure...
|
echo Running invokeai-model-install...
|
||||||
python .venv\Scripts\invokeai-configure.exe %*
|
python .venv\Scripts\invokeai-model-install.exe
|
||||||
) ELSE IF /I "%restore%" == "6" (
|
) ELSE IF /I "%restore%" == "6" (
|
||||||
|
echo Running invokeai-configure...
|
||||||
|
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
|
||||||
|
) ELSE IF /I "%restore%" == "7" (
|
||||||
|
echo Running invokeai-configure...
|
||||||
|
python .venv\Scripts\invokeai-configure.exe --yes --default_only
|
||||||
|
) ELSE IF /I "%restore%" == "8" (
|
||||||
echo Developer Console
|
echo Developer Console
|
||||||
echo Python command is:
|
echo Python command is:
|
||||||
where python
|
where python
|
||||||
@ -43,14 +54,27 @@ IF /I "%restore%" == "1" (
|
|||||||
echo *************************
|
echo *************************
|
||||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||||
call cmd /k
|
call cmd /k
|
||||||
) ELSE IF /I "%restore%" == "7" (
|
) ELSE IF /I "%restore%" == "9" (
|
||||||
|
echo Running invokeai-update...
|
||||||
|
python .venv\Scripts\invokeai-update.exe %*
|
||||||
|
) ELSE IF /I "%restore%" == "10" (
|
||||||
echo Displaying command line help...
|
echo Displaying command line help...
|
||||||
python .venv\Scripts\invokeai.exe --help %*
|
python .venv\Scripts\invokeai.exe --help %*
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
|
) ELSE IF /I "%restore%" == "q" (
|
||||||
|
echo Goodbye!
|
||||||
|
goto ending
|
||||||
) ELSE (
|
) ELSE (
|
||||||
echo Invalid selection
|
echo Invalid selection
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
)
|
)
|
||||||
|
goto start
|
||||||
|
|
||||||
endlocal
|
endlocal
|
||||||
|
pause
|
||||||
|
|
||||||
|
:ending
|
||||||
|
exit /b
|
||||||
|
|
||||||
|
@ -25,49 +25,69 @@ if [ "$(uname -s)" == "Darwin" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$0" != "bash" ]; then
|
if [ "$0" != "bash" ]; then
|
||||||
|
while true
|
||||||
|
do
|
||||||
echo "Do you want to generate images using the"
|
echo "Do you want to generate images using the"
|
||||||
echo "1. command-line"
|
echo "1. command-line interface"
|
||||||
echo "2. browser-based UI"
|
echo "2. browser-based UI"
|
||||||
echo "3. run textual inversion training"
|
echo "3. run textual inversion training"
|
||||||
echo "4. merge models (diffusers type only)"
|
echo "4. merge models (diffusers type only)"
|
||||||
echo "5. open the developer console"
|
echo "5. download and install models"
|
||||||
echo "6. re-run the configure script to download new models"
|
echo "6. change InvokeAI startup options"
|
||||||
echo "7. command-line help "
|
echo "7. re-run the configure script to fix a broken install"
|
||||||
|
echo "8. open the developer console"
|
||||||
|
echo "9. update InvokeAI"
|
||||||
|
echo "10. command-line help"
|
||||||
|
echo "Q - Quit"
|
||||||
echo ""
|
echo ""
|
||||||
read -p "Please enter 1, 2, 3, 4, 5, 6 or 7: [2] " yn
|
read -p "Please enter 1-10, Q: [2] " yn
|
||||||
choice=${yn:='2'}
|
choice=${yn:='2'}
|
||||||
case $choice in
|
case $choice in
|
||||||
1)
|
1)
|
||||||
echo "Starting the InvokeAI command-line..."
|
echo "Starting the InvokeAI command-line..."
|
||||||
exec invokeai $@
|
invokeai $@
|
||||||
;;
|
;;
|
||||||
2)
|
2)
|
||||||
echo "Starting the InvokeAI browser-based UI..."
|
echo "Starting the InvokeAI browser-based UI..."
|
||||||
exec invokeai --web $@
|
invokeai --web $@
|
||||||
;;
|
;;
|
||||||
3)
|
3)
|
||||||
echo "Starting Textual Inversion:"
|
echo "Starting Textual Inversion:"
|
||||||
exec invokeai-ti --gui $@
|
invokeai-ti --gui $@
|
||||||
;;
|
;;
|
||||||
4)
|
4)
|
||||||
echo "Merging Models:"
|
echo "Merging Models:"
|
||||||
exec invokeai-merge --gui $@
|
invokeai-merge --gui $@
|
||||||
;;
|
;;
|
||||||
5)
|
5)
|
||||||
|
invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||||
|
;;
|
||||||
|
6)
|
||||||
|
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||||
|
;;
|
||||||
|
7)
|
||||||
|
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||||
|
;;
|
||||||
|
8)
|
||||||
echo "Developer Console:"
|
echo "Developer Console:"
|
||||||
file_name=$(basename "${BASH_SOURCE[0]}")
|
file_name=$(basename "${BASH_SOURCE[0]}")
|
||||||
bash --init-file "$file_name"
|
bash --init-file "$file_name"
|
||||||
;;
|
;;
|
||||||
6)
|
9)
|
||||||
exec invokeai-configure --root ${INVOKEAI_ROOT}
|
echo "Update:"
|
||||||
|
invokeai-update
|
||||||
;;
|
;;
|
||||||
7)
|
10)
|
||||||
exec invokeai --help
|
invokeai --help
|
||||||
|
;;
|
||||||
|
[qQ])
|
||||||
|
exit 0
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Invalid selection"
|
echo "Invalid selection"
|
||||||
exit;;
|
exit;;
|
||||||
esac
|
esac
|
||||||
|
done
|
||||||
else # in developer console
|
else # in developer console
|
||||||
python --version
|
python --version
|
||||||
echo "Press ^D to exit"
|
echo "Press ^D to exit"
|
||||||
|
@ -1,3 +1,11 @@
|
|||||||
After version 2.3 is released, the ldm/invoke modules will be migrated to this location
|
Organization of the source tree:
|
||||||
so that we have a proper invokeai distribution. Currently it is only being used for
|
|
||||||
data files.
|
app -- Home of nodes invocations and services
|
||||||
|
assets -- Images and other data files used by InvokeAI
|
||||||
|
backend -- Non-user facing libraries, including the rendering
|
||||||
|
core.
|
||||||
|
configs -- Configuration files used at install and run times
|
||||||
|
frontend -- User-facing scripts, including the CLI and the WebUI
|
||||||
|
version -- Current InvokeAI version string, stored
|
||||||
|
in version/invokeai_version.py
|
||||||
|
|
96
invokeai/app/api/dependencies.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import os
|
||||||
|
from argparse import Namespace
|
||||||
|
|
||||||
|
from invokeai.app.services.metadata import PngMetadataService, MetadataServiceBase
|
||||||
|
|
||||||
|
from ..services.default_graphs import create_system_graphs
|
||||||
|
|
||||||
|
from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
|
||||||
|
|
||||||
|
from ...backend import Globals
|
||||||
|
from ..services.model_manager_initializer import get_model_manager
|
||||||
|
from ..services.restoration_services import RestorationServices
|
||||||
|
from ..services.graph import GraphExecutionState, LibraryGraph
|
||||||
|
from ..services.image_storage import DiskImageStorage
|
||||||
|
from ..services.invocation_queue import MemoryInvocationQueue
|
||||||
|
from ..services.invocation_services import InvocationServices
|
||||||
|
from ..services.invoker import Invoker
|
||||||
|
from ..services.processor import DefaultInvocationProcessor
|
||||||
|
from ..services.sqlite import SqliteItemStorage
|
||||||
|
from .events import FastAPIEventService
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: is there a better way to achieve this?
|
||||||
|
def check_internet() -> bool:
|
||||||
|
"""
|
||||||
|
Return true if the internet is reachable.
|
||||||
|
It does this by pinging huggingface.co.
|
||||||
|
"""
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
host = "http://huggingface.co"
|
||||||
|
try:
|
||||||
|
urllib.request.urlopen(host, timeout=1)
|
||||||
|
return True
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class ApiDependencies:
|
||||||
|
"""Contains and initializes all dependencies for the API"""
|
||||||
|
|
||||||
|
invoker: Invoker = None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def initialize(config, event_handler_id: int):
|
||||||
|
Globals.try_patchmatch = config.patchmatch
|
||||||
|
Globals.always_use_cpu = config.always_use_cpu
|
||||||
|
Globals.internet_available = config.internet_available and check_internet()
|
||||||
|
Globals.disable_xformers = not config.xformers
|
||||||
|
Globals.ckpt_convert = config.ckpt_convert
|
||||||
|
|
||||||
|
# TODO: Use a logger
|
||||||
|
print(f">> Internet connectivity is {Globals.internet_available}")
|
||||||
|
|
||||||
|
events = FastAPIEventService(event_handler_id)
|
||||||
|
|
||||||
|
output_folder = os.path.abspath(
|
||||||
|
os.path.join(os.path.dirname(__file__), "../../../../outputs")
|
||||||
|
)
|
||||||
|
|
||||||
|
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents'))
|
||||||
|
|
||||||
|
metadata = PngMetadataService()
|
||||||
|
|
||||||
|
images = DiskImageStorage(f'{output_folder}/images', metadata_service=metadata)
|
||||||
|
|
||||||
|
# TODO: build a file/path manager?
|
||||||
|
db_location = os.path.join(output_folder, "invokeai.db")
|
||||||
|
|
||||||
|
services = InvocationServices(
|
||||||
|
model_manager=get_model_manager(config),
|
||||||
|
events=events,
|
||||||
|
latents=latents,
|
||||||
|
images=images,
|
||||||
|
metadata=metadata,
|
||||||
|
queue=MemoryInvocationQueue(),
|
||||||
|
graph_library=SqliteItemStorage[LibraryGraph](
|
||||||
|
filename=db_location, table_name="graphs"
|
||||||
|
),
|
||||||
|
graph_execution_manager=SqliteItemStorage[GraphExecutionState](
|
||||||
|
filename=db_location, table_name="graph_executions"
|
||||||
|
),
|
||||||
|
processor=DefaultInvocationProcessor(),
|
||||||
|
restoration=RestorationServices(config),
|
||||||
|
)
|
||||||
|
|
||||||
|
create_system_graphs(services.graph_library)
|
||||||
|
|
||||||
|
ApiDependencies.invoker = Invoker(services)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def shutdown():
|
||||||
|
if ApiDependencies.invoker:
|
||||||
|
ApiDependencies.invoker.stop()
|
52
invokeai/app/api/events.py
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import threading
|
||||||
|
from queue import Empty, Queue
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from fastapi_events.dispatcher import dispatch
|
||||||
|
|
||||||
|
from ..services.events import EventServiceBase
|
||||||
|
|
||||||
|
|
||||||
|
class FastAPIEventService(EventServiceBase):
|
||||||
|
event_handler_id: int
|
||||||
|
__queue: Queue
|
||||||
|
__stop_event: threading.Event
|
||||||
|
|
||||||
|
def __init__(self, event_handler_id: int) -> None:
|
||||||
|
self.event_handler_id = event_handler_id
|
||||||
|
self.__queue = Queue()
|
||||||
|
self.__stop_event = threading.Event()
|
||||||
|
asyncio.create_task(self.__dispatch_from_queue(stop_event=self.__stop_event))
|
||||||
|
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def stop(self, *args, **kwargs):
|
||||||
|
self.__stop_event.set()
|
||||||
|
self.__queue.put(None)
|
||||||
|
|
||||||
|
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||||
|
self.__queue.put(dict(event_name=event_name, payload=payload))
|
||||||
|
|
||||||
|
async def __dispatch_from_queue(self, stop_event: threading.Event):
|
||||||
|
"""Get events on from the queue and dispatch them, from the correct thread"""
|
||||||
|
while not stop_event.is_set():
|
||||||
|
try:
|
||||||
|
event = self.__queue.get(block=False)
|
||||||
|
if not event: # Probably stopping
|
||||||
|
continue
|
||||||
|
|
||||||
|
dispatch(
|
||||||
|
event.get("event_name"),
|
||||||
|
payload=event.get("payload"),
|
||||||
|
middleware_id=self.event_handler_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
except Empty:
|
||||||
|
await asyncio.sleep(0.001)
|
||||||
|
pass
|
||||||
|
|
||||||
|
except asyncio.CancelledError as e:
|
||||||
|
raise e # Raise a proper error
|
34
invokeai/app/api/models/images.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from invokeai.app.models.image import ImageType
|
||||||
|
from invokeai.app.services.metadata import InvokeAIMetadata
|
||||||
|
|
||||||
|
|
||||||
|
class ImageResponseMetadata(BaseModel):
|
||||||
|
"""An image's metadata. Used only in HTTP responses."""
|
||||||
|
|
||||||
|
created: int = Field(description="The creation timestamp of the image")
|
||||||
|
width: int = Field(description="The width of the image in pixels")
|
||||||
|
height: int = Field(description="The height of the image in pixels")
|
||||||
|
invokeai: Optional[InvokeAIMetadata] = Field(
|
||||||
|
description="The image's InvokeAI-specific metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageResponse(BaseModel):
|
||||||
|
"""The response type for images"""
|
||||||
|
|
||||||
|
image_type: ImageType = Field(description="The type of the image")
|
||||||
|
image_name: str = Field(description="The name of the image")
|
||||||
|
image_url: str = Field(description="The url of the image")
|
||||||
|
thumbnail_url: str = Field(description="The url of the image's thumbnail")
|
||||||
|
metadata: ImageResponseMetadata = Field(description="The image's metadata")
|
||||||
|
|
||||||
|
|
||||||
|
class ProgressImage(BaseModel):
|
||||||
|
"""The progress image sent intermittently during processing"""
|
||||||
|
|
||||||
|
width: int = Field(description="The effective width of the image in pixels")
|
||||||
|
height: int = Field(description="The effective height of the image in pixels")
|
||||||
|
dataURL: str = Field(description="The image data as a b64 data URL")
|
128
invokeai/app/api/routers/images.py
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
import io
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from typing import Any
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from fastapi import HTTPException, Path, Query, Request, UploadFile
|
||||||
|
from fastapi.responses import FileResponse, Response
|
||||||
|
from fastapi.routing import APIRouter
|
||||||
|
from PIL import Image
|
||||||
|
from invokeai.app.api.models.images import ImageResponse, ImageResponseMetadata
|
||||||
|
from invokeai.app.services.metadata import InvokeAIMetadata
|
||||||
|
from invokeai.app.services.item_storage import PaginatedResults
|
||||||
|
|
||||||
|
from ...services.image_storage import ImageType
|
||||||
|
from ..dependencies import ApiDependencies
|
||||||
|
|
||||||
|
images_router = APIRouter(prefix="/v1/images", tags=["images"])
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.get("/{image_type}/{image_name}", operation_id="get_image")
|
||||||
|
async def get_image(
|
||||||
|
image_type: ImageType = Path(description="The type of image to get"),
|
||||||
|
image_name: str = Path(description="The name of the image to get"),
|
||||||
|
) -> FileResponse | Response:
|
||||||
|
"""Gets a result"""
|
||||||
|
|
||||||
|
path = ApiDependencies.invoker.services.images.get_path(
|
||||||
|
image_type=image_type, image_name=image_name
|
||||||
|
)
|
||||||
|
|
||||||
|
if ApiDependencies.invoker.services.images.validate_path(path):
|
||||||
|
return FileResponse(path)
|
||||||
|
else:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.get(
|
||||||
|
"/{image_type}/thumbnails/{image_name}", operation_id="get_thumbnail"
|
||||||
|
)
|
||||||
|
async def get_thumbnail(
|
||||||
|
image_type: ImageType = Path(description="The type of image to get"),
|
||||||
|
image_name: str = Path(description="The name of the image to get"),
|
||||||
|
) -> FileResponse | Response:
|
||||||
|
"""Gets a thumbnail"""
|
||||||
|
|
||||||
|
path = ApiDependencies.invoker.services.images.get_path(
|
||||||
|
image_type=image_type, image_name=image_name, is_thumbnail=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if ApiDependencies.invoker.services.images.validate_path(path):
|
||||||
|
return FileResponse(path)
|
||||||
|
else:
|
||||||
|
raise HTTPException(status_code=404)
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.post(
|
||||||
|
"/uploads/",
|
||||||
|
operation_id="upload_image",
|
||||||
|
responses={
|
||||||
|
201: {
|
||||||
|
"description": "The image was uploaded successfully",
|
||||||
|
"model": ImageResponse,
|
||||||
|
},
|
||||||
|
415: {"description": "Image upload failed"},
|
||||||
|
},
|
||||||
|
status_code=201,
|
||||||
|
)
|
||||||
|
async def upload_image(
|
||||||
|
file: UploadFile, request: Request, response: Response
|
||||||
|
) -> ImageResponse:
|
||||||
|
if not file.content_type.startswith("image"):
|
||||||
|
raise HTTPException(status_code=415, detail="Not an image")
|
||||||
|
|
||||||
|
contents = await file.read()
|
||||||
|
|
||||||
|
try:
|
||||||
|
img = Image.open(io.BytesIO(contents))
|
||||||
|
except:
|
||||||
|
# Error opening the image
|
||||||
|
raise HTTPException(status_code=415, detail="Failed to read image")
|
||||||
|
|
||||||
|
filename = f"{uuid.uuid4()}_{str(int(datetime.now(timezone.utc).timestamp()))}.png"
|
||||||
|
|
||||||
|
(image_path, thumbnail_path, ctime) = ApiDependencies.invoker.services.images.save(
|
||||||
|
ImageType.UPLOAD, filename, img
|
||||||
|
)
|
||||||
|
|
||||||
|
invokeai_metadata = ApiDependencies.invoker.services.metadata.get_metadata(img)
|
||||||
|
|
||||||
|
res = ImageResponse(
|
||||||
|
image_type=ImageType.UPLOAD,
|
||||||
|
image_name=filename,
|
||||||
|
image_url=f"api/v1/images/{ImageType.UPLOAD.value}/{filename}",
|
||||||
|
thumbnail_url=f"api/v1/images/{ImageType.UPLOAD.value}/thumbnails/{os.path.splitext(filename)[0]}.webp",
|
||||||
|
metadata=ImageResponseMetadata(
|
||||||
|
created=ctime,
|
||||||
|
width=img.width,
|
||||||
|
height=img.height,
|
||||||
|
invokeai=invokeai_metadata,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
response.status_code = 201
|
||||||
|
response.headers["Location"] = request.url_for(
|
||||||
|
"get_image", image_type=ImageType.UPLOAD.value, image_name=filename
|
||||||
|
)
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
@images_router.get(
|
||||||
|
"/",
|
||||||
|
operation_id="list_images",
|
||||||
|
responses={200: {"model": PaginatedResults[ImageResponse]}},
|
||||||
|
)
|
||||||
|
async def list_images(
|
||||||
|
image_type: ImageType = Query(
|
||||||
|
default=ImageType.RESULT, description="The type of images to get"
|
||||||
|
),
|
||||||
|
page: int = Query(default=0, description="The page of images to get"),
|
||||||
|
per_page: int = Query(default=10, description="The number of images per page"),
|
||||||
|
) -> PaginatedResults[ImageResponse]:
|
||||||
|
"""Gets a list of images"""
|
||||||
|
result = ApiDependencies.invoker.services.images.list(image_type, page, per_page)
|
||||||
|
return result
|
251
invokeai/app/api/routers/models.py
Normal file
@ -0,0 +1,251 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) and 2023 Kent Keirsey (https://github.com/hipsterusername)
|
||||||
|
|
||||||
|
import shutil
|
||||||
|
import asyncio
|
||||||
|
from typing import Annotated, Any, List, Literal, Optional, Union
|
||||||
|
|
||||||
|
from fastapi.routing import APIRouter, HTTPException
|
||||||
|
from pydantic import BaseModel, Field, parse_obj_as
|
||||||
|
from pathlib import Path
|
||||||
|
from ..dependencies import ApiDependencies
|
||||||
|
from invokeai.backend.globals import Globals, global_converted_ckpts_dir
|
||||||
|
from invokeai.backend.args import Args
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
models_router = APIRouter(prefix="/v1/models", tags=["models"])
|
||||||
|
|
||||||
|
|
||||||
|
class VaeRepo(BaseModel):
|
||||||
|
repo_id: str = Field(description="The repo ID to use for this VAE")
|
||||||
|
path: Optional[str] = Field(description="The path to the VAE")
|
||||||
|
subfolder: Optional[str] = Field(description="The subfolder to use for this VAE")
|
||||||
|
|
||||||
|
class ModelInfo(BaseModel):
|
||||||
|
description: Optional[str] = Field(description="A description of the model")
|
||||||
|
|
||||||
|
class CkptModelInfo(ModelInfo):
|
||||||
|
format: Literal['ckpt'] = 'ckpt'
|
||||||
|
|
||||||
|
config: str = Field(description="The path to the model config")
|
||||||
|
weights: str = Field(description="The path to the model weights")
|
||||||
|
vae: str = Field(description="The path to the model VAE")
|
||||||
|
width: Optional[int] = Field(description="The width of the model")
|
||||||
|
height: Optional[int] = Field(description="The height of the model")
|
||||||
|
|
||||||
|
class DiffusersModelInfo(ModelInfo):
|
||||||
|
format: Literal['diffusers'] = 'diffusers'
|
||||||
|
|
||||||
|
vae: Optional[VaeRepo] = Field(description="The VAE repo to use for this model")
|
||||||
|
repo_id: Optional[str] = Field(description="The repo ID to use for this model")
|
||||||
|
path: Optional[str] = Field(description="The path to the model")
|
||||||
|
|
||||||
|
class CreateModelRequest(BaseModel):
|
||||||
|
name: str = Field(description="The name of the model")
|
||||||
|
info: Union[CkptModelInfo, DiffusersModelInfo] = Field(discriminator="format", description="The model info")
|
||||||
|
|
||||||
|
class CreateModelResponse(BaseModel):
|
||||||
|
name: str = Field(description="The name of the new model")
|
||||||
|
info: Union[CkptModelInfo, DiffusersModelInfo] = Field(discriminator="format", description="The model info")
|
||||||
|
status: str = Field(description="The status of the API response")
|
||||||
|
|
||||||
|
class ConversionRequest(BaseModel):
|
||||||
|
name: str = Field(description="The name of the new model")
|
||||||
|
info: CkptModelInfo = Field(description="The converted model info")
|
||||||
|
save_location: str = Field(description="The path to save the converted model weights")
|
||||||
|
|
||||||
|
|
||||||
|
class ConvertedModelResponse(BaseModel):
|
||||||
|
name: str = Field(description="The name of the new model")
|
||||||
|
info: DiffusersModelInfo = Field(description="The converted model info")
|
||||||
|
|
||||||
|
class ModelsList(BaseModel):
|
||||||
|
models: dict[str, Annotated[Union[(CkptModelInfo,DiffusersModelInfo)], Field(discriminator="format")]]
|
||||||
|
|
||||||
|
|
||||||
|
@models_router.get(
|
||||||
|
"/",
|
||||||
|
operation_id="list_models",
|
||||||
|
responses={200: {"model": ModelsList }},
|
||||||
|
)
|
||||||
|
async def list_models() -> ModelsList:
|
||||||
|
"""Gets a list of models"""
|
||||||
|
models_raw = ApiDependencies.invoker.services.model_manager.list_models()
|
||||||
|
models = parse_obj_as(ModelsList, { "models": models_raw })
|
||||||
|
return models
|
||||||
|
|
||||||
|
|
||||||
|
@models_router.post(
|
||||||
|
"/",
|
||||||
|
operation_id="update_model",
|
||||||
|
responses={200: {"status": "success"}},
|
||||||
|
)
|
||||||
|
async def update_model(
|
||||||
|
model_request: CreateModelRequest
|
||||||
|
) -> CreateModelResponse:
|
||||||
|
""" Add Model """
|
||||||
|
model_request_info = model_request.info
|
||||||
|
info_dict = model_request_info.dict()
|
||||||
|
model_response = CreateModelResponse(name=model_request.name, info=model_request.info, status="success")
|
||||||
|
|
||||||
|
ApiDependencies.invoker.services.model_manager.add_model(
|
||||||
|
model_name=model_request.name,
|
||||||
|
model_attributes=info_dict,
|
||||||
|
clobber=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
return model_response
|
||||||
|
|
||||||
|
|
||||||
|
@models_router.delete(
|
||||||
|
"/{model_name}",
|
||||||
|
operation_id="del_model",
|
||||||
|
responses={
|
||||||
|
204: {
|
||||||
|
"description": "Model deleted successfully"
|
||||||
|
},
|
||||||
|
404: {
|
||||||
|
"description": "Model not found"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def delete_model(model_name: str) -> None:
|
||||||
|
"""Delete Model"""
|
||||||
|
model_names = ApiDependencies.invoker.services.model_manager.model_names()
|
||||||
|
model_exists = model_name in model_names
|
||||||
|
|
||||||
|
# check if model exists
|
||||||
|
print(f">> Checking for model {model_name}...")
|
||||||
|
|
||||||
|
if model_exists:
|
||||||
|
print(f">> Deleting Model: {model_name}")
|
||||||
|
ApiDependencies.invoker.services.model_manager.del_model(model_name, delete_files=True)
|
||||||
|
print(f">> Model Deleted: {model_name}")
|
||||||
|
raise HTTPException(status_code=204, detail=f"Model '{model_name}' deleted successfully")
|
||||||
|
|
||||||
|
else:
|
||||||
|
print(f">> Model not found")
|
||||||
|
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found")
|
||||||
|
|
||||||
|
|
||||||
|
# @socketio.on("convertToDiffusers")
|
||||||
|
# def convert_to_diffusers(model_to_convert: dict):
|
||||||
|
# try:
|
||||||
|
# if model_info := self.generate.model_manager.model_info(
|
||||||
|
# model_name=model_to_convert["model_name"]
|
||||||
|
# ):
|
||||||
|
# if "weights" in model_info:
|
||||||
|
# ckpt_path = Path(model_info["weights"])
|
||||||
|
# original_config_file = Path(model_info["config"])
|
||||||
|
# model_name = model_to_convert["model_name"]
|
||||||
|
# model_description = model_info["description"]
|
||||||
|
# else:
|
||||||
|
# self.socketio.emit(
|
||||||
|
# "error", {"message": "Model is not a valid checkpoint file"}
|
||||||
|
# )
|
||||||
|
# else:
|
||||||
|
# self.socketio.emit(
|
||||||
|
# "error", {"message": "Could not retrieve model info."}
|
||||||
|
# )
|
||||||
|
|
||||||
|
# if not ckpt_path.is_absolute():
|
||||||
|
# ckpt_path = Path(Globals.root, ckpt_path)
|
||||||
|
|
||||||
|
# if original_config_file and not original_config_file.is_absolute():
|
||||||
|
# original_config_file = Path(Globals.root, original_config_file)
|
||||||
|
|
||||||
|
# diffusers_path = Path(
|
||||||
|
# ckpt_path.parent.absolute(), f"{model_name}_diffusers"
|
||||||
|
# )
|
||||||
|
|
||||||
|
# if model_to_convert["save_location"] == "root":
|
||||||
|
# diffusers_path = Path(
|
||||||
|
# global_converted_ckpts_dir(), f"{model_name}_diffusers"
|
||||||
|
# )
|
||||||
|
|
||||||
|
# if (
|
||||||
|
# model_to_convert["save_location"] == "custom"
|
||||||
|
# and model_to_convert["custom_location"] is not None
|
||||||
|
# ):
|
||||||
|
# diffusers_path = Path(
|
||||||
|
# model_to_convert["custom_location"], f"{model_name}_diffusers"
|
||||||
|
# )
|
||||||
|
|
||||||
|
# if diffusers_path.exists():
|
||||||
|
# shutil.rmtree(diffusers_path)
|
||||||
|
|
||||||
|
# self.generate.model_manager.convert_and_import(
|
||||||
|
# ckpt_path,
|
||||||
|
# diffusers_path,
|
||||||
|
# model_name=model_name,
|
||||||
|
# model_description=model_description,
|
||||||
|
# vae=None,
|
||||||
|
# original_config_file=original_config_file,
|
||||||
|
# commit_to_conf=opt.conf,
|
||||||
|
# )
|
||||||
|
|
||||||
|
# new_model_list = self.generate.model_manager.list_models()
|
||||||
|
# socketio.emit(
|
||||||
|
# "modelConverted",
|
||||||
|
# {
|
||||||
|
# "new_model_name": model_name,
|
||||||
|
# "model_list": new_model_list,
|
||||||
|
# "update": True,
|
||||||
|
# },
|
||||||
|
# )
|
||||||
|
# print(f">> Model Converted: {model_name}")
|
||||||
|
# except Exception as e:
|
||||||
|
# self.handle_exceptions(e)
|
||||||
|
|
||||||
|
# @socketio.on("mergeDiffusersModels")
|
||||||
|
# def merge_diffusers_models(model_merge_info: dict):
|
||||||
|
# try:
|
||||||
|
# models_to_merge = model_merge_info["models_to_merge"]
|
||||||
|
# model_ids_or_paths = [
|
||||||
|
# self.generate.model_manager.model_name_or_path(x)
|
||||||
|
# for x in models_to_merge
|
||||||
|
# ]
|
||||||
|
# merged_pipe = merge_diffusion_models(
|
||||||
|
# model_ids_or_paths,
|
||||||
|
# model_merge_info["alpha"],
|
||||||
|
# model_merge_info["interp"],
|
||||||
|
# model_merge_info["force"],
|
||||||
|
# )
|
||||||
|
|
||||||
|
# dump_path = global_models_dir() / "merged_models"
|
||||||
|
# if model_merge_info["model_merge_save_path"] is not None:
|
||||||
|
# dump_path = Path(model_merge_info["model_merge_save_path"])
|
||||||
|
|
||||||
|
# os.makedirs(dump_path, exist_ok=True)
|
||||||
|
# dump_path = dump_path / model_merge_info["merged_model_name"]
|
||||||
|
# merged_pipe.save_pretrained(dump_path, safe_serialization=1)
|
||||||
|
|
||||||
|
# merged_model_config = dict(
|
||||||
|
# model_name=model_merge_info["merged_model_name"],
|
||||||
|
# description=f'Merge of models {", ".join(models_to_merge)}',
|
||||||
|
# commit_to_conf=opt.conf,
|
||||||
|
# )
|
||||||
|
|
||||||
|
# if vae := self.generate.model_manager.config[models_to_merge[0]].get(
|
||||||
|
# "vae", None
|
||||||
|
# ):
|
||||||
|
# print(f">> Using configured VAE assigned to {models_to_merge[0]}")
|
||||||
|
# merged_model_config.update(vae=vae)
|
||||||
|
|
||||||
|
# self.generate.model_manager.import_diffuser_model(
|
||||||
|
# dump_path, **merged_model_config
|
||||||
|
# )
|
||||||
|
# new_model_list = self.generate.model_manager.list_models()
|
||||||
|
|
||||||
|
# socketio.emit(
|
||||||
|
# "modelsMerged",
|
||||||
|
# {
|
||||||
|
# "merged_models": models_to_merge,
|
||||||
|
# "merged_model_name": model_merge_info["merged_model_name"],
|
||||||
|
# "model_list": new_model_list,
|
||||||
|
# "update": True,
|
||||||
|
# },
|
||||||
|
# )
|
||||||
|
# print(f">> Models Merged: {models_to_merge}")
|
||||||
|
# print(f">> New Model Added: {model_merge_info['merged_model_name']}")
|
||||||
|
# except Exception as e:
|
287
invokeai/app/api/routers/sessions.py
Normal file
@ -0,0 +1,287 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Annotated, List, Optional, Union
|
||||||
|
|
||||||
|
from fastapi import Body, Path, Query
|
||||||
|
from fastapi.responses import Response
|
||||||
|
from fastapi.routing import APIRouter
|
||||||
|
from pydantic.fields import Field
|
||||||
|
|
||||||
|
from ...invocations import *
|
||||||
|
from ...invocations.baseinvocation import BaseInvocation
|
||||||
|
from ...services.graph import (
|
||||||
|
Edge,
|
||||||
|
EdgeConnection,
|
||||||
|
Graph,
|
||||||
|
GraphExecutionState,
|
||||||
|
NodeAlreadyExecutedError,
|
||||||
|
)
|
||||||
|
from ...services.item_storage import PaginatedResults
|
||||||
|
from ..dependencies import ApiDependencies
|
||||||
|
|
||||||
|
session_router = APIRouter(prefix="/v1/sessions", tags=["sessions"])
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.post(
|
||||||
|
"/",
|
||||||
|
operation_id="create_session",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid json"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def create_session(
|
||||||
|
graph: Optional[Graph] = Body(
|
||||||
|
default=None, description="The graph to initialize the session with"
|
||||||
|
)
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Creates a new session, optionally initializing it with an invocation graph"""
|
||||||
|
session = ApiDependencies.invoker.create_execution_state(graph)
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.get(
|
||||||
|
"/",
|
||||||
|
operation_id="list_sessions",
|
||||||
|
responses={200: {"model": PaginatedResults[GraphExecutionState]}},
|
||||||
|
)
|
||||||
|
async def list_sessions(
|
||||||
|
page: int = Query(default=0, description="The page of results to get"),
|
||||||
|
per_page: int = Query(default=10, description="The number of results per page"),
|
||||||
|
query: str = Query(default="", description="The query string to search for"),
|
||||||
|
) -> PaginatedResults[GraphExecutionState]:
|
||||||
|
"""Gets a list of sessions, optionally searching"""
|
||||||
|
if query == "":
|
||||||
|
result = ApiDependencies.invoker.services.graph_execution_manager.list(
|
||||||
|
page, per_page
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
result = ApiDependencies.invoker.services.graph_execution_manager.search(
|
||||||
|
query, page, per_page
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.get(
|
||||||
|
"/{session_id}",
|
||||||
|
operation_id="get_session",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def get_session(
|
||||||
|
session_id: str = Path(description="The id of the session to get"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Gets a session"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
else:
|
||||||
|
return session
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.post(
|
||||||
|
"/{session_id}/nodes",
|
||||||
|
operation_id="add_node",
|
||||||
|
responses={
|
||||||
|
200: {"model": str},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def add_node(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
node: Annotated[
|
||||||
|
Union[BaseInvocation.get_invocations()], Field(discriminator="type") # type: ignore
|
||||||
|
] = Body(description="The node to add"),
|
||||||
|
) -> str:
|
||||||
|
"""Adds a node to the graph"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.add_node(node)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session.id
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.put(
|
||||||
|
"/{session_id}/nodes/{node_path}",
|
||||||
|
operation_id="update_node",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def update_node(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
node_path: str = Path(description="The path to the node in the graph"),
|
||||||
|
node: Annotated[
|
||||||
|
Union[BaseInvocation.get_invocations()], Field(discriminator="type") # type: ignore
|
||||||
|
] = Body(description="The new node"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Updates a node in the graph and removes all linked edges"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.update_node(node_path, node)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.delete(
|
||||||
|
"/{session_id}/nodes/{node_path}",
|
||||||
|
operation_id="delete_node",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def delete_node(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
node_path: str = Path(description="The path to the node to delete"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Deletes a node in the graph and removes all linked edges"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.delete_node(node_path)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.post(
|
||||||
|
"/{session_id}/edges",
|
||||||
|
operation_id="add_edge",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def add_edge(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
edge: Edge = Body(description="The edge to add"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Adds an edge to the graph"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
session.add_edge(edge)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: the edge being in the path here is really ugly, find a better solution
|
||||||
|
@session_router.delete(
|
||||||
|
"/{session_id}/edges/{from_node_id}/{from_field}/{to_node_id}/{to_field}",
|
||||||
|
operation_id="delete_edge",
|
||||||
|
responses={
|
||||||
|
200: {"model": GraphExecutionState},
|
||||||
|
400: {"description": "Invalid node or link"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def delete_edge(
|
||||||
|
session_id: str = Path(description="The id of the session"),
|
||||||
|
from_node_id: str = Path(description="The id of the node the edge is coming from"),
|
||||||
|
from_field: str = Path(description="The field of the node the edge is coming from"),
|
||||||
|
to_node_id: str = Path(description="The id of the node the edge is going to"),
|
||||||
|
to_field: str = Path(description="The field of the node the edge is going to"),
|
||||||
|
) -> GraphExecutionState:
|
||||||
|
"""Deletes an edge from the graph"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
|
||||||
|
try:
|
||||||
|
edge = Edge(
|
||||||
|
source=EdgeConnection(node_id=from_node_id, field=from_field),
|
||||||
|
destination=EdgeConnection(node_id=to_node_id, field=to_field)
|
||||||
|
)
|
||||||
|
session.delete_edge(edge)
|
||||||
|
ApiDependencies.invoker.services.graph_execution_manager.set(
|
||||||
|
session
|
||||||
|
) # TODO: can this be done automatically, or add node through an API?
|
||||||
|
return session
|
||||||
|
except NodeAlreadyExecutedError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
except IndexError:
|
||||||
|
return Response(status_code=400)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.put(
|
||||||
|
"/{session_id}/invoke",
|
||||||
|
operation_id="invoke_session",
|
||||||
|
responses={
|
||||||
|
200: {"model": None},
|
||||||
|
202: {"description": "The invocation is queued"},
|
||||||
|
400: {"description": "The session has no invocations ready to invoke"},
|
||||||
|
404: {"description": "Session not found"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def invoke_session(
|
||||||
|
session_id: str = Path(description="The id of the session to invoke"),
|
||||||
|
all: bool = Query(
|
||||||
|
default=False, description="Whether or not to invoke all remaining invocations"
|
||||||
|
),
|
||||||
|
) -> None:
|
||||||
|
"""Invokes a session"""
|
||||||
|
session = ApiDependencies.invoker.services.graph_execution_manager.get(session_id)
|
||||||
|
if session is None:
|
||||||
|
return Response(status_code=404)
|
||||||
|
|
||||||
|
if session.is_complete():
|
||||||
|
return Response(status_code=400)
|
||||||
|
|
||||||
|
ApiDependencies.invoker.invoke(session, invoke_all=all)
|
||||||
|
return Response(status_code=202)
|
||||||
|
|
||||||
|
|
||||||
|
@session_router.delete(
|
||||||
|
"/{session_id}/invoke",
|
||||||
|
operation_id="cancel_session_invoke",
|
||||||
|
responses={
|
||||||
|
202: {"description": "The invocation is canceled"}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
async def cancel_session_invoke(
|
||||||
|
session_id: str = Path(description="The id of the session to cancel"),
|
||||||
|
) -> None:
|
||||||
|
"""Invokes a session"""
|
||||||
|
ApiDependencies.invoker.cancel(session_id)
|
||||||
|
return Response(status_code=202)
|
38
invokeai/app/api/sockets.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from fastapi_events.handlers.local import local_handler
|
||||||
|
from fastapi_events.typing import Event
|
||||||
|
from fastapi_socketio import SocketManager
|
||||||
|
|
||||||
|
from ..services.events import EventServiceBase
|
||||||
|
|
||||||
|
|
||||||
|
class SocketIO:
|
||||||
|
__sio: SocketManager
|
||||||
|
|
||||||
|
def __init__(self, app: FastAPI):
|
||||||
|
self.__sio = SocketManager(app=app)
|
||||||
|
self.__sio.on("subscribe", handler=self._handle_sub)
|
||||||
|
self.__sio.on("unsubscribe", handler=self._handle_unsub)
|
||||||
|
|
||||||
|
local_handler.register(
|
||||||
|
event_name=EventServiceBase.session_event, _func=self._handle_session_event
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _handle_session_event(self, event: Event):
|
||||||
|
await self.__sio.emit(
|
||||||
|
event=event[1]["event"],
|
||||||
|
data=event[1]["data"],
|
||||||
|
room=event[1]["data"]["graph_execution_state_id"],
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _handle_sub(self, sid, data, *args, **kwargs):
|
||||||
|
if "session" in data:
|
||||||
|
self.__sio.enter_room(sid, data["session"])
|
||||||
|
|
||||||
|
# @app.sio.on('unsubscribe')
|
||||||
|
|
||||||
|
async def _handle_unsub(self, sid, data, *args, **kwargs):
|
||||||
|
if "session" in data:
|
||||||
|
self.__sio.leave_room(sid, data["session"])
|
160
invokeai/app/api_app.py
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
import asyncio
|
||||||
|
from inspect import signature
|
||||||
|
|
||||||
|
import uvicorn
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
|
from fastapi.openapi.docs import get_redoc_html, get_swagger_ui_html
|
||||||
|
from fastapi.openapi.utils import get_openapi
|
||||||
|
from fastapi.staticfiles import StaticFiles
|
||||||
|
from fastapi_events.handlers.local import local_handler
|
||||||
|
from fastapi_events.middleware import EventHandlerASGIMiddleware
|
||||||
|
from pydantic.schema import schema
|
||||||
|
|
||||||
|
from ..backend import Args
|
||||||
|
from .api.dependencies import ApiDependencies
|
||||||
|
from .api.routers import images, sessions, models
|
||||||
|
from .api.sockets import SocketIO
|
||||||
|
from .invocations import *
|
||||||
|
from .invocations.baseinvocation import BaseInvocation
|
||||||
|
|
||||||
|
# Create the app
|
||||||
|
# TODO: create this all in a method so configuration/etc. can be passed in?
|
||||||
|
app = FastAPI(title="Invoke AI", docs_url=None, redoc_url=None)
|
||||||
|
|
||||||
|
# Add event handler
|
||||||
|
event_handler_id: int = id(app)
|
||||||
|
app.add_middleware(
|
||||||
|
EventHandlerASGIMiddleware,
|
||||||
|
handlers=[
|
||||||
|
local_handler
|
||||||
|
], # TODO: consider doing this in services to support different configurations
|
||||||
|
middleware_id=event_handler_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add CORS
|
||||||
|
# TODO: use configuration for this
|
||||||
|
origins = []
|
||||||
|
app.add_middleware(
|
||||||
|
CORSMiddleware,
|
||||||
|
allow_origins=origins,
|
||||||
|
allow_credentials=True,
|
||||||
|
allow_methods=["*"],
|
||||||
|
allow_headers=["*"],
|
||||||
|
)
|
||||||
|
|
||||||
|
socket_io = SocketIO(app)
|
||||||
|
|
||||||
|
config = {}
|
||||||
|
|
||||||
|
|
||||||
|
# Add startup event to load dependencies
|
||||||
|
@app.on_event("startup")
|
||||||
|
async def startup_event():
|
||||||
|
config = Args()
|
||||||
|
config.parse_args()
|
||||||
|
|
||||||
|
ApiDependencies.initialize(
|
||||||
|
config=config, event_handler_id=event_handler_id
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Shut down threads
|
||||||
|
@app.on_event("shutdown")
|
||||||
|
async def shutdown_event():
|
||||||
|
ApiDependencies.shutdown()
|
||||||
|
|
||||||
|
|
||||||
|
# Include all routers
|
||||||
|
# TODO: REMOVE
|
||||||
|
# app.include_router(
|
||||||
|
# invocation.invocation_router,
|
||||||
|
# prefix = '/api')
|
||||||
|
|
||||||
|
app.include_router(sessions.session_router, prefix="/api")
|
||||||
|
|
||||||
|
app.include_router(images.images_router, prefix="/api")
|
||||||
|
|
||||||
|
app.include_router(models.models_router, prefix="/api")
|
||||||
|
|
||||||
|
|
||||||
|
# Build a custom OpenAPI to include all outputs
|
||||||
|
# TODO: can outputs be included on metadata of invocation schemas somehow?
|
||||||
|
def custom_openapi():
|
||||||
|
if app.openapi_schema:
|
||||||
|
return app.openapi_schema
|
||||||
|
openapi_schema = get_openapi(
|
||||||
|
title=app.title,
|
||||||
|
description="An API for invoking AI image operations",
|
||||||
|
version="1.0.0",
|
||||||
|
routes=app.routes,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add all outputs
|
||||||
|
all_invocations = BaseInvocation.get_invocations()
|
||||||
|
output_types = set()
|
||||||
|
output_type_titles = dict()
|
||||||
|
for invoker in all_invocations:
|
||||||
|
output_type = signature(invoker.invoke).return_annotation
|
||||||
|
output_types.add(output_type)
|
||||||
|
|
||||||
|
output_schemas = schema(output_types, ref_prefix="#/components/schemas/")
|
||||||
|
for schema_key, output_schema in output_schemas["definitions"].items():
|
||||||
|
openapi_schema["components"]["schemas"][schema_key] = output_schema
|
||||||
|
|
||||||
|
# TODO: note that we assume the schema_key here is the TYPE.__name__
|
||||||
|
# This could break in some cases, figure out a better way to do it
|
||||||
|
output_type_titles[schema_key] = output_schema["title"]
|
||||||
|
|
||||||
|
# Add a reference to the output type to additionalProperties of the invoker schema
|
||||||
|
for invoker in all_invocations:
|
||||||
|
invoker_name = invoker.__name__
|
||||||
|
output_type = signature(invoker.invoke).return_annotation
|
||||||
|
output_type_title = output_type_titles[output_type.__name__]
|
||||||
|
invoker_schema = openapi_schema["components"]["schemas"][invoker_name]
|
||||||
|
outputs_ref = {"$ref": f"#/components/schemas/{output_type_title}"}
|
||||||
|
|
||||||
|
invoker_schema["output"] = outputs_ref
|
||||||
|
|
||||||
|
app.openapi_schema = openapi_schema
|
||||||
|
return app.openapi_schema
|
||||||
|
|
||||||
|
|
||||||
|
app.openapi = custom_openapi
|
||||||
|
|
||||||
|
# Override API doc favicons
|
||||||
|
app.mount("/static", StaticFiles(directory="static/dream_web"), name="static")
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/docs", include_in_schema=False)
|
||||||
|
def overridden_swagger():
|
||||||
|
return get_swagger_ui_html(
|
||||||
|
openapi_url=app.openapi_url,
|
||||||
|
title=app.title,
|
||||||
|
swagger_favicon_url="/static/favicon.ico",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/redoc", include_in_schema=False)
|
||||||
|
def overridden_redoc():
|
||||||
|
return get_redoc_html(
|
||||||
|
openapi_url=app.openapi_url,
|
||||||
|
title=app.title,
|
||||||
|
redoc_favicon_url="/static/favicon.ico",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def invoke_api():
|
||||||
|
# Start our own event loop for eventing usage
|
||||||
|
# TODO: determine if there's a better way to do this
|
||||||
|
loop = asyncio.new_event_loop()
|
||||||
|
config = uvicorn.Config(app=app, host="0.0.0.0", port=9090, loop=loop)
|
||||||
|
# Use access_log to turn off logging
|
||||||
|
|
||||||
|
server = uvicorn.Server(config)
|
||||||
|
loop.run_until_complete(server.serve())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
invoke_api()
|
286
invokeai/app/cli/commands.py
Normal file
@ -0,0 +1,286 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
import argparse
|
||||||
|
from typing import Any, Callable, Iterable, Literal, get_args, get_origin, get_type_hints
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
import networkx as nx
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
from ..invocations.baseinvocation import BaseInvocation
|
||||||
|
from ..invocations.image import ImageField
|
||||||
|
from ..services.graph import GraphExecutionState, LibraryGraph, GraphInvocation, Edge
|
||||||
|
from ..services.invoker import Invoker
|
||||||
|
|
||||||
|
|
||||||
|
def add_field_argument(command_parser, name: str, field, default_override = None):
|
||||||
|
default = default_override if default_override is not None else field.default if field.default_factory is None else field.default_factory()
|
||||||
|
if get_origin(field.type_) == Literal:
|
||||||
|
allowed_values = get_args(field.type_)
|
||||||
|
allowed_types = set()
|
||||||
|
for val in allowed_values:
|
||||||
|
allowed_types.add(type(val))
|
||||||
|
allowed_types_list = list(allowed_types)
|
||||||
|
field_type = allowed_types_list[0] if len(allowed_types) == 1 else Union[allowed_types_list] # type: ignore
|
||||||
|
|
||||||
|
command_parser.add_argument(
|
||||||
|
f"--{name}",
|
||||||
|
dest=name,
|
||||||
|
type=field_type,
|
||||||
|
default=default,
|
||||||
|
choices=allowed_values,
|
||||||
|
help=field.field_info.description,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
command_parser.add_argument(
|
||||||
|
f"--{name}",
|
||||||
|
dest=name,
|
||||||
|
type=field.type_,
|
||||||
|
default=default,
|
||||||
|
help=field.field_info.description,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def add_parsers(
|
||||||
|
subparsers,
|
||||||
|
commands: list[type],
|
||||||
|
command_field: str = "type",
|
||||||
|
exclude_fields: list[str] = ["id", "type"],
|
||||||
|
add_arguments: Callable[[argparse.ArgumentParser], None]|None = None
|
||||||
|
):
|
||||||
|
"""Adds parsers for each command to the subparsers"""
|
||||||
|
|
||||||
|
# Create subparsers for each command
|
||||||
|
for command in commands:
|
||||||
|
hints = get_type_hints(command)
|
||||||
|
cmd_name = get_args(hints[command_field])[0]
|
||||||
|
command_parser = subparsers.add_parser(cmd_name, help=command.__doc__)
|
||||||
|
|
||||||
|
if add_arguments is not None:
|
||||||
|
add_arguments(command_parser)
|
||||||
|
|
||||||
|
# Convert all fields to arguments
|
||||||
|
fields = command.__fields__ # type: ignore
|
||||||
|
for name, field in fields.items():
|
||||||
|
if name in exclude_fields:
|
||||||
|
continue
|
||||||
|
|
||||||
|
add_field_argument(command_parser, name, field)
|
||||||
|
|
||||||
|
|
||||||
|
def add_graph_parsers(
|
||||||
|
subparsers,
|
||||||
|
graphs: list[LibraryGraph],
|
||||||
|
add_arguments: Callable[[argparse.ArgumentParser], None]|None = None
|
||||||
|
):
|
||||||
|
for graph in graphs:
|
||||||
|
command_parser = subparsers.add_parser(graph.name, help=graph.description)
|
||||||
|
|
||||||
|
if add_arguments is not None:
|
||||||
|
add_arguments(command_parser)
|
||||||
|
|
||||||
|
# Add arguments for inputs
|
||||||
|
for exposed_input in graph.exposed_inputs:
|
||||||
|
node = graph.graph.get_node(exposed_input.node_path)
|
||||||
|
field = node.__fields__[exposed_input.field]
|
||||||
|
default_override = getattr(node, exposed_input.field)
|
||||||
|
add_field_argument(command_parser, exposed_input.alias, field, default_override)
|
||||||
|
|
||||||
|
|
||||||
|
class CliContext:
|
||||||
|
invoker: Invoker
|
||||||
|
session: GraphExecutionState
|
||||||
|
parser: argparse.ArgumentParser
|
||||||
|
defaults: dict[str, Any]
|
||||||
|
graph_nodes: dict[str, str]
|
||||||
|
nodes_added: list[str]
|
||||||
|
|
||||||
|
def __init__(self, invoker: Invoker, session: GraphExecutionState, parser: argparse.ArgumentParser):
|
||||||
|
self.invoker = invoker
|
||||||
|
self.session = session
|
||||||
|
self.parser = parser
|
||||||
|
self.defaults = dict()
|
||||||
|
self.graph_nodes = dict()
|
||||||
|
self.nodes_added = list()
|
||||||
|
|
||||||
|
def get_session(self):
|
||||||
|
self.session = self.invoker.services.graph_execution_manager.get(self.session.id)
|
||||||
|
return self.session
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
self.session = self.invoker.create_execution_state()
|
||||||
|
self.graph_nodes = dict()
|
||||||
|
self.nodes_added = list()
|
||||||
|
# Leave defaults unchanged
|
||||||
|
|
||||||
|
def add_node(self, node: BaseInvocation):
|
||||||
|
self.get_session()
|
||||||
|
self.session.graph.add_node(node)
|
||||||
|
self.nodes_added.append(node.id)
|
||||||
|
self.invoker.services.graph_execution_manager.set(self.session)
|
||||||
|
|
||||||
|
def add_edge(self, edge: Edge):
|
||||||
|
self.get_session()
|
||||||
|
self.session.add_edge(edge)
|
||||||
|
self.invoker.services.graph_execution_manager.set(self.session)
|
||||||
|
|
||||||
|
|
||||||
|
class ExitCli(Exception):
|
||||||
|
"""Exception to exit the CLI"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class BaseCommand(ABC, BaseModel):
|
||||||
|
"""A CLI command"""
|
||||||
|
|
||||||
|
# All commands must include a type name like this:
|
||||||
|
# type: Literal['your_command_name'] = 'your_command_name'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_all_subclasses(cls):
|
||||||
|
subclasses = []
|
||||||
|
toprocess = [cls]
|
||||||
|
while len(toprocess) > 0:
|
||||||
|
next = toprocess.pop(0)
|
||||||
|
next_subclasses = next.__subclasses__()
|
||||||
|
subclasses.extend(next_subclasses)
|
||||||
|
toprocess.extend(next_subclasses)
|
||||||
|
return subclasses
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_commands(cls):
|
||||||
|
return tuple(BaseCommand.get_all_subclasses())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_commands_map(cls):
|
||||||
|
# Get the type strings out of the literals and into a dictionary
|
||||||
|
return dict(map(lambda t: (get_args(get_type_hints(t)['type'])[0], t),BaseCommand.get_all_subclasses()))
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
"""Run the command. Raise ExitCli to exit."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ExitCommand(BaseCommand):
|
||||||
|
"""Exits the CLI"""
|
||||||
|
type: Literal['exit'] = 'exit'
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
raise ExitCli()
|
||||||
|
|
||||||
|
|
||||||
|
class HelpCommand(BaseCommand):
|
||||||
|
"""Shows help"""
|
||||||
|
type: Literal['help'] = 'help'
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
context.parser.print_help()
|
||||||
|
|
||||||
|
|
||||||
|
def get_graph_execution_history(
|
||||||
|
graph_execution_state: GraphExecutionState,
|
||||||
|
) -> Iterable[str]:
|
||||||
|
"""Gets the history of fully-executed invocations for a graph execution"""
|
||||||
|
return (
|
||||||
|
n
|
||||||
|
for n in reversed(graph_execution_state.executed_history)
|
||||||
|
if n in graph_execution_state.graph.nodes
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_invocation_command(invocation) -> str:
|
||||||
|
fields = invocation.__fields__.items()
|
||||||
|
type_hints = get_type_hints(type(invocation))
|
||||||
|
command = [invocation.type]
|
||||||
|
for name, field in fields:
|
||||||
|
if name in ["id", "type"]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# TODO: add links
|
||||||
|
|
||||||
|
# Skip image fields when serializing command
|
||||||
|
type_hint = type_hints.get(name) or None
|
||||||
|
if type_hint is ImageField or ImageField in get_args(type_hint):
|
||||||
|
continue
|
||||||
|
|
||||||
|
field_value = getattr(invocation, name)
|
||||||
|
field_default = field.default
|
||||||
|
if field_value != field_default:
|
||||||
|
if type_hint is str or str in get_args(type_hint):
|
||||||
|
command.append(f'--{name} "{field_value}"')
|
||||||
|
else:
|
||||||
|
command.append(f"--{name} {field_value}")
|
||||||
|
|
||||||
|
return " ".join(command)
|
||||||
|
|
||||||
|
|
||||||
|
class HistoryCommand(BaseCommand):
|
||||||
|
"""Shows the invocation history"""
|
||||||
|
type: Literal['history'] = 'history'
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
# fmt: off
|
||||||
|
count: int = Field(default=5, gt=0, description="The number of history entries to show")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
history = list(get_graph_execution_history(context.get_session()))
|
||||||
|
for i in range(min(self.count, len(history))):
|
||||||
|
entry_id = history[-1 - i]
|
||||||
|
entry = context.get_session().graph.get_node(entry_id)
|
||||||
|
print(f"{entry_id}: {get_invocation_command(entry)}")
|
||||||
|
|
||||||
|
|
||||||
|
class SetDefaultCommand(BaseCommand):
|
||||||
|
"""Sets a default value for a field"""
|
||||||
|
type: Literal['default'] = 'default'
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
# fmt: off
|
||||||
|
field: str = Field(description="The field to set the default for")
|
||||||
|
value: str = Field(description="The value to set the default to, or None to clear the default")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
if self.value is None:
|
||||||
|
if self.field in context.defaults:
|
||||||
|
del context.defaults[self.field]
|
||||||
|
else:
|
||||||
|
context.defaults[self.field] = self.value
|
||||||
|
|
||||||
|
|
||||||
|
class DrawGraphCommand(BaseCommand):
|
||||||
|
"""Debugs a graph"""
|
||||||
|
type: Literal['draw_graph'] = 'draw_graph'
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
session: GraphExecutionState = context.invoker.services.graph_execution_manager.get(context.session.id)
|
||||||
|
nxgraph = session.graph.nx_graph_flat()
|
||||||
|
|
||||||
|
# Draw the networkx graph
|
||||||
|
plt.figure(figsize=(20, 20))
|
||||||
|
pos = nx.spectral_layout(nxgraph)
|
||||||
|
nx.draw_networkx_nodes(nxgraph, pos, node_size=1000)
|
||||||
|
nx.draw_networkx_edges(nxgraph, pos, width=2)
|
||||||
|
nx.draw_networkx_labels(nxgraph, pos, font_size=20, font_family="sans-serif")
|
||||||
|
plt.axis("off")
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
class DrawExecutionGraphCommand(BaseCommand):
|
||||||
|
"""Debugs an execution graph"""
|
||||||
|
type: Literal['draw_xgraph'] = 'draw_xgraph'
|
||||||
|
|
||||||
|
def run(self, context: CliContext) -> None:
|
||||||
|
session: GraphExecutionState = context.invoker.services.graph_execution_manager.get(context.session.id)
|
||||||
|
nxgraph = session.execution_graph.nx_graph_flat()
|
||||||
|
|
||||||
|
# Draw the networkx graph
|
||||||
|
plt.figure(figsize=(20, 20))
|
||||||
|
pos = nx.spectral_layout(nxgraph)
|
||||||
|
nx.draw_networkx_nodes(nxgraph, pos, node_size=1000)
|
||||||
|
nx.draw_networkx_edges(nxgraph, pos, width=2)
|
||||||
|
nx.draw_networkx_labels(nxgraph, pos, font_size=20, font_family="sans-serif")
|
||||||
|
plt.axis("off")
|
||||||
|
plt.show()
|
167
invokeai/app/cli/completer.py
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
"""
|
||||||
|
Readline helper functions for cli_app.py
|
||||||
|
You may import the global singleton `completer` to get access to the
|
||||||
|
completer object.
|
||||||
|
"""
|
||||||
|
import atexit
|
||||||
|
import readline
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Dict, Literal, get_args, get_type_hints, get_origin
|
||||||
|
|
||||||
|
from ...backend import ModelManager, Globals
|
||||||
|
from ..invocations.baseinvocation import BaseInvocation
|
||||||
|
from .commands import BaseCommand
|
||||||
|
|
||||||
|
# singleton object, class variable
|
||||||
|
completer = None
|
||||||
|
|
||||||
|
class Completer(object):
|
||||||
|
|
||||||
|
def __init__(self, model_manager: ModelManager):
|
||||||
|
self.commands = self.get_commands()
|
||||||
|
self.matches = None
|
||||||
|
self.linebuffer = None
|
||||||
|
self.manager = model_manager
|
||||||
|
return
|
||||||
|
|
||||||
|
def complete(self, text, state):
|
||||||
|
"""
|
||||||
|
Complete commands and switches fromm the node CLI command line.
|
||||||
|
Switches are determined in a context-specific manner.
|
||||||
|
"""
|
||||||
|
|
||||||
|
buffer = readline.get_line_buffer()
|
||||||
|
if state == 0:
|
||||||
|
options = None
|
||||||
|
try:
|
||||||
|
current_command, current_switch = self.get_current_command(buffer)
|
||||||
|
options = self.get_command_options(current_command, current_switch)
|
||||||
|
except IndexError:
|
||||||
|
pass
|
||||||
|
options = options or list(self.parse_commands().keys())
|
||||||
|
|
||||||
|
if not text: # first time
|
||||||
|
self.matches = options
|
||||||
|
else:
|
||||||
|
self.matches = [s for s in options if s and s.startswith(text)]
|
||||||
|
|
||||||
|
try:
|
||||||
|
match = self.matches[state]
|
||||||
|
except IndexError:
|
||||||
|
match = None
|
||||||
|
return match
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_commands(self)->List[object]:
|
||||||
|
"""
|
||||||
|
Return a list of all the client commands and invocations.
|
||||||
|
"""
|
||||||
|
return BaseCommand.get_commands() + BaseInvocation.get_invocations()
|
||||||
|
|
||||||
|
def get_current_command(self, buffer: str)->tuple[str, str]:
|
||||||
|
"""
|
||||||
|
Parse the readline buffer to find the most recent command and its switch.
|
||||||
|
"""
|
||||||
|
if len(buffer)==0:
|
||||||
|
return None, None
|
||||||
|
tokens = shlex.split(buffer)
|
||||||
|
command = None
|
||||||
|
switch = None
|
||||||
|
for t in tokens:
|
||||||
|
if t[0].isalpha():
|
||||||
|
if switch is None:
|
||||||
|
command = t
|
||||||
|
else:
|
||||||
|
switch = t
|
||||||
|
# don't try to autocomplete switches that are already complete
|
||||||
|
if switch and buffer.endswith(' '):
|
||||||
|
switch=None
|
||||||
|
return command or '', switch or ''
|
||||||
|
|
||||||
|
def parse_commands(self)->Dict[str, List[str]]:
|
||||||
|
"""
|
||||||
|
Return a dict in which the keys are the command name
|
||||||
|
and the values are the parameters the command takes.
|
||||||
|
"""
|
||||||
|
result = dict()
|
||||||
|
for command in self.commands:
|
||||||
|
hints = get_type_hints(command)
|
||||||
|
name = get_args(hints['type'])[0]
|
||||||
|
result.update({name:hints})
|
||||||
|
return result
|
||||||
|
|
||||||
|
def get_command_options(self, command: str, switch: str)->List[str]:
|
||||||
|
"""
|
||||||
|
Return all the parameters that can be passed to the command as
|
||||||
|
command-line switches. Returns None if the command is unrecognized.
|
||||||
|
"""
|
||||||
|
parsed_commands = self.parse_commands()
|
||||||
|
if command not in parsed_commands:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# handle switches in the format "-foo=bar"
|
||||||
|
argument = None
|
||||||
|
if switch and '=' in switch:
|
||||||
|
switch, argument = switch.split('=')
|
||||||
|
|
||||||
|
parameter = switch.strip('-')
|
||||||
|
if parameter in parsed_commands[command]:
|
||||||
|
if argument is None:
|
||||||
|
return self.get_parameter_options(parameter, parsed_commands[command][parameter])
|
||||||
|
else:
|
||||||
|
return [f"--{parameter}={x}" for x in self.get_parameter_options(parameter, parsed_commands[command][parameter])]
|
||||||
|
else:
|
||||||
|
return [f"--{x}" for x in parsed_commands[command].keys()]
|
||||||
|
|
||||||
|
def get_parameter_options(self, parameter: str, typehint)->List[str]:
|
||||||
|
"""
|
||||||
|
Given a parameter type (such as Literal), offers autocompletions.
|
||||||
|
"""
|
||||||
|
if get_origin(typehint) == Literal:
|
||||||
|
return get_args(typehint)
|
||||||
|
if parameter == 'model':
|
||||||
|
return self.manager.model_names()
|
||||||
|
|
||||||
|
def _pre_input_hook(self):
|
||||||
|
if self.linebuffer:
|
||||||
|
readline.insert_text(self.linebuffer)
|
||||||
|
readline.redisplay()
|
||||||
|
self.linebuffer = None
|
||||||
|
|
||||||
|
def set_autocompleter(model_manager: ModelManager) -> Completer:
|
||||||
|
global completer
|
||||||
|
|
||||||
|
if completer:
|
||||||
|
return completer
|
||||||
|
|
||||||
|
completer = Completer(model_manager)
|
||||||
|
|
||||||
|
readline.set_completer(completer.complete)
|
||||||
|
# pyreadline3 does not have a set_auto_history() method
|
||||||
|
try:
|
||||||
|
readline.set_auto_history(True)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
readline.set_pre_input_hook(completer._pre_input_hook)
|
||||||
|
readline.set_completer_delims(" ")
|
||||||
|
readline.parse_and_bind("tab: complete")
|
||||||
|
readline.parse_and_bind("set print-completions-horizontally off")
|
||||||
|
readline.parse_and_bind("set page-completions on")
|
||||||
|
readline.parse_and_bind("set skip-completed-text on")
|
||||||
|
readline.parse_and_bind("set show-all-if-ambiguous on")
|
||||||
|
|
||||||
|
histfile = Path(Globals.root, ".invoke_history")
|
||||||
|
try:
|
||||||
|
readline.read_history_file(histfile)
|
||||||
|
readline.set_history_length(1000)
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
except OSError: # file likely corrupted
|
||||||
|
newname = f"{histfile}.old"
|
||||||
|
print(
|
||||||
|
f"## Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}"
|
||||||
|
)
|
||||||
|
histfile.replace(Path(newname))
|
||||||
|
atexit.register(readline.write_history_file, histfile)
|
386
invokeai/app/cli_app.py
Normal file
@ -0,0 +1,386 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shlex
|
||||||
|
import time
|
||||||
|
from typing import (
|
||||||
|
Union,
|
||||||
|
get_type_hints,
|
||||||
|
)
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from pydantic.fields import Field
|
||||||
|
|
||||||
|
from invokeai.app.services.metadata import PngMetadataService
|
||||||
|
|
||||||
|
from .services.default_graphs import create_system_graphs
|
||||||
|
|
||||||
|
from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
|
||||||
|
|
||||||
|
from ..backend import Args
|
||||||
|
from .cli.commands import BaseCommand, CliContext, ExitCli, add_graph_parsers, add_parsers, get_graph_execution_history
|
||||||
|
from .cli.completer import set_autocompleter
|
||||||
|
from .invocations import *
|
||||||
|
from .invocations.baseinvocation import BaseInvocation
|
||||||
|
from .services.events import EventServiceBase
|
||||||
|
from .services.model_manager_initializer import get_model_manager
|
||||||
|
from .services.restoration_services import RestorationServices
|
||||||
|
from .services.graph import Edge, EdgeConnection, ExposedNodeInput, GraphExecutionState, GraphInvocation, LibraryGraph, are_connection_types_compatible
|
||||||
|
from .services.default_graphs import default_text_to_image_graph_id
|
||||||
|
from .services.image_storage import DiskImageStorage
|
||||||
|
from .services.invocation_queue import MemoryInvocationQueue
|
||||||
|
from .services.invocation_services import InvocationServices
|
||||||
|
from .services.invoker import Invoker
|
||||||
|
from .services.processor import DefaultInvocationProcessor
|
||||||
|
from .services.sqlite import SqliteItemStorage
|
||||||
|
|
||||||
|
|
||||||
|
class CliCommand(BaseModel):
|
||||||
|
command: Union[BaseCommand.get_commands() + BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidArgs(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def add_invocation_args(command_parser):
|
||||||
|
# Add linking capability
|
||||||
|
command_parser.add_argument(
|
||||||
|
"--link",
|
||||||
|
"-l",
|
||||||
|
action="append",
|
||||||
|
nargs=3,
|
||||||
|
help="A link in the format 'source_node source_field dest_field'. source_node can be relative to history (e.g. -1)",
|
||||||
|
)
|
||||||
|
|
||||||
|
command_parser.add_argument(
|
||||||
|
"--link_node",
|
||||||
|
"-ln",
|
||||||
|
action="append",
|
||||||
|
help="A link from all fields in the specified node. Node can be relative to history (e.g. -1)",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_command_parser(services: InvocationServices) -> argparse.ArgumentParser:
|
||||||
|
# Create invocation parser
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
|
def exit(*args, **kwargs):
|
||||||
|
raise InvalidArgs
|
||||||
|
|
||||||
|
parser.exit = exit
|
||||||
|
subparsers = parser.add_subparsers(dest="type")
|
||||||
|
|
||||||
|
# Create subparsers for each invocation
|
||||||
|
invocations = BaseInvocation.get_all_subclasses()
|
||||||
|
add_parsers(subparsers, invocations, add_arguments=add_invocation_args)
|
||||||
|
|
||||||
|
# Create subparsers for each command
|
||||||
|
commands = BaseCommand.get_all_subclasses()
|
||||||
|
add_parsers(subparsers, commands, exclude_fields=["type"])
|
||||||
|
|
||||||
|
# Create subparsers for exposed CLI graphs
|
||||||
|
# TODO: add a way to identify these graphs
|
||||||
|
text_to_image = services.graph_library.get(default_text_to_image_graph_id)
|
||||||
|
add_graph_parsers(subparsers, [text_to_image], add_arguments=add_invocation_args)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
class NodeField():
|
||||||
|
alias: str
|
||||||
|
node_path: str
|
||||||
|
field: str
|
||||||
|
field_type: type
|
||||||
|
|
||||||
|
def __init__(self, alias: str, node_path: str, field: str, field_type: type):
|
||||||
|
self.alias = alias
|
||||||
|
self.node_path = node_path
|
||||||
|
self.field = field
|
||||||
|
self.field_type = field_type
|
||||||
|
|
||||||
|
|
||||||
|
def fields_from_type_hints(hints: dict[str, type], node_path: str) -> dict[str,NodeField]:
|
||||||
|
return {k:NodeField(alias=k, node_path=node_path, field=k, field_type=v) for k, v in hints.items()}
|
||||||
|
|
||||||
|
|
||||||
|
def get_node_input_field(graph: LibraryGraph, field_alias: str, node_id: str) -> NodeField:
|
||||||
|
"""Gets the node field for the specified field alias"""
|
||||||
|
exposed_input = next(e for e in graph.exposed_inputs if e.alias == field_alias)
|
||||||
|
node_type = type(graph.graph.get_node(exposed_input.node_path))
|
||||||
|
return NodeField(alias=exposed_input.alias, node_path=f'{node_id}.{exposed_input.node_path}', field=exposed_input.field, field_type=get_type_hints(node_type)[exposed_input.field])
|
||||||
|
|
||||||
|
|
||||||
|
def get_node_output_field(graph: LibraryGraph, field_alias: str, node_id: str) -> NodeField:
|
||||||
|
"""Gets the node field for the specified field alias"""
|
||||||
|
exposed_output = next(e for e in graph.exposed_outputs if e.alias == field_alias)
|
||||||
|
node_type = type(graph.graph.get_node(exposed_output.node_path))
|
||||||
|
node_output_type = node_type.get_output_type()
|
||||||
|
return NodeField(alias=exposed_output.alias, node_path=f'{node_id}.{exposed_output.node_path}', field=exposed_output.field, field_type=get_type_hints(node_output_type)[exposed_output.field])
|
||||||
|
|
||||||
|
|
||||||
|
def get_node_inputs(invocation: BaseInvocation, context: CliContext) -> dict[str, NodeField]:
|
||||||
|
"""Gets the inputs for the specified invocation from the context"""
|
||||||
|
node_type = type(invocation)
|
||||||
|
if node_type is not GraphInvocation:
|
||||||
|
return fields_from_type_hints(get_type_hints(node_type), invocation.id)
|
||||||
|
else:
|
||||||
|
graph: LibraryGraph = context.invoker.services.graph_library.get(context.graph_nodes[invocation.id])
|
||||||
|
return {e.alias: get_node_input_field(graph, e.alias, invocation.id) for e in graph.exposed_inputs}
|
||||||
|
|
||||||
|
|
||||||
|
def get_node_outputs(invocation: BaseInvocation, context: CliContext) -> dict[str, NodeField]:
|
||||||
|
"""Gets the outputs for the specified invocation from the context"""
|
||||||
|
node_type = type(invocation)
|
||||||
|
if node_type is not GraphInvocation:
|
||||||
|
return fields_from_type_hints(get_type_hints(node_type.get_output_type()), invocation.id)
|
||||||
|
else:
|
||||||
|
graph: LibraryGraph = context.invoker.services.graph_library.get(context.graph_nodes[invocation.id])
|
||||||
|
return {e.alias: get_node_output_field(graph, e.alias, invocation.id) for e in graph.exposed_outputs}
|
||||||
|
|
||||||
|
|
||||||
|
def generate_matching_edges(
|
||||||
|
a: BaseInvocation, b: BaseInvocation, context: CliContext
|
||||||
|
) -> list[Edge]:
|
||||||
|
"""Generates all possible edges between two invocations"""
|
||||||
|
afields = get_node_outputs(a, context)
|
||||||
|
bfields = get_node_inputs(b, context)
|
||||||
|
|
||||||
|
matching_fields = set(afields.keys()).intersection(bfields.keys())
|
||||||
|
|
||||||
|
# Remove invalid fields
|
||||||
|
invalid_fields = set(["type", "id"])
|
||||||
|
matching_fields = matching_fields.difference(invalid_fields)
|
||||||
|
|
||||||
|
# Validate types
|
||||||
|
matching_fields = [f for f in matching_fields if are_connection_types_compatible(afields[f].field_type, bfields[f].field_type)]
|
||||||
|
|
||||||
|
edges = [
|
||||||
|
Edge(
|
||||||
|
source=EdgeConnection(node_id=afields[alias].node_path, field=afields[alias].field),
|
||||||
|
destination=EdgeConnection(node_id=bfields[alias].node_path, field=bfields[alias].field)
|
||||||
|
)
|
||||||
|
for alias in matching_fields
|
||||||
|
]
|
||||||
|
return edges
|
||||||
|
|
||||||
|
|
||||||
|
class SessionError(Exception):
|
||||||
|
"""Raised when a session error has occurred"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def invoke_all(context: CliContext):
|
||||||
|
"""Runs all invocations in the specified session"""
|
||||||
|
context.invoker.invoke(context.session, invoke_all=True)
|
||||||
|
while not context.get_session().is_complete():
|
||||||
|
# Wait some time
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
# Print any errors
|
||||||
|
if context.session.has_error():
|
||||||
|
for n in context.session.errors:
|
||||||
|
print(
|
||||||
|
f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {context.session.errors[n]}"
|
||||||
|
)
|
||||||
|
|
||||||
|
raise SessionError()
|
||||||
|
|
||||||
|
|
||||||
|
def invoke_cli():
|
||||||
|
config = Args()
|
||||||
|
config.parse_args()
|
||||||
|
model_manager = get_model_manager(config)
|
||||||
|
|
||||||
|
# This initializes the autocompleter and returns it.
|
||||||
|
# Currently nothing is done with the returned Completer
|
||||||
|
# object, but the object can be used to change autocompletion
|
||||||
|
# behavior on the fly, if desired.
|
||||||
|
completer = set_autocompleter(model_manager)
|
||||||
|
|
||||||
|
events = EventServiceBase()
|
||||||
|
|
||||||
|
metadata = PngMetadataService()
|
||||||
|
|
||||||
|
output_folder = os.path.abspath(
|
||||||
|
os.path.join(os.path.dirname(__file__), "../../../outputs")
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO: build a file/path manager?
|
||||||
|
db_location = os.path.join(output_folder, "invokeai.db")
|
||||||
|
|
||||||
|
services = InvocationServices(
|
||||||
|
model_manager=model_manager,
|
||||||
|
events=events,
|
||||||
|
latents = ForwardCacheLatentsStorage(DiskLatentsStorage(f'{output_folder}/latents')),
|
||||||
|
images=DiskImageStorage(f'{output_folder}/images', metadata_service=metadata),
|
||||||
|
metadata=metadata,
|
||||||
|
queue=MemoryInvocationQueue(),
|
||||||
|
graph_library=SqliteItemStorage[LibraryGraph](
|
||||||
|
filename=db_location, table_name="graphs"
|
||||||
|
),
|
||||||
|
graph_execution_manager=SqliteItemStorage[GraphExecutionState](
|
||||||
|
filename=db_location, table_name="graph_executions"
|
||||||
|
),
|
||||||
|
processor=DefaultInvocationProcessor(),
|
||||||
|
restoration=RestorationServices(config),
|
||||||
|
)
|
||||||
|
|
||||||
|
system_graphs = create_system_graphs(services.graph_library)
|
||||||
|
system_graph_names = set([g.name for g in system_graphs])
|
||||||
|
|
||||||
|
invoker = Invoker(services)
|
||||||
|
session: GraphExecutionState = invoker.create_execution_state()
|
||||||
|
parser = get_command_parser(services)
|
||||||
|
|
||||||
|
re_negid = re.compile('^-[0-9]+$')
|
||||||
|
|
||||||
|
# Uncomment to print out previous sessions at startup
|
||||||
|
# print(services.session_manager.list())
|
||||||
|
|
||||||
|
context = CliContext(invoker, session, parser)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
cmd_input = input("invoke> ")
|
||||||
|
except (KeyboardInterrupt, EOFError):
|
||||||
|
# Ctrl-c exits
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Refresh the state of the session
|
||||||
|
#history = list(get_graph_execution_history(context.session))
|
||||||
|
history = list(reversed(context.nodes_added))
|
||||||
|
|
||||||
|
# Split the command for piping
|
||||||
|
cmds = cmd_input.split("|")
|
||||||
|
start_id = len(context.nodes_added)
|
||||||
|
current_id = start_id
|
||||||
|
new_invocations = list()
|
||||||
|
for cmd in cmds:
|
||||||
|
if cmd is None or cmd.strip() == "":
|
||||||
|
raise InvalidArgs("Empty command")
|
||||||
|
|
||||||
|
# Parse args to create invocation
|
||||||
|
args = vars(context.parser.parse_args(shlex.split(cmd.strip())))
|
||||||
|
|
||||||
|
# Override defaults
|
||||||
|
for field_name, field_default in context.defaults.items():
|
||||||
|
if field_name in args:
|
||||||
|
args[field_name] = field_default
|
||||||
|
|
||||||
|
# Parse invocation
|
||||||
|
command: CliCommand = None # type:ignore
|
||||||
|
system_graph: LibraryGraph|None = None
|
||||||
|
if args['type'] in system_graph_names:
|
||||||
|
system_graph = next(filter(lambda g: g.name == args['type'], system_graphs))
|
||||||
|
invocation = GraphInvocation(graph=system_graph.graph, id=str(current_id))
|
||||||
|
for exposed_input in system_graph.exposed_inputs:
|
||||||
|
if exposed_input.alias in args:
|
||||||
|
node = invocation.graph.get_node(exposed_input.node_path)
|
||||||
|
field = exposed_input.field
|
||||||
|
setattr(node, field, args[exposed_input.alias])
|
||||||
|
command = CliCommand(command = invocation)
|
||||||
|
context.graph_nodes[invocation.id] = system_graph.id
|
||||||
|
else:
|
||||||
|
args["id"] = current_id
|
||||||
|
command = CliCommand(command=args)
|
||||||
|
|
||||||
|
if command is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Run any CLI commands immediately
|
||||||
|
if isinstance(command.command, BaseCommand):
|
||||||
|
# Invoke all current nodes to preserve operation order
|
||||||
|
invoke_all(context)
|
||||||
|
|
||||||
|
# Run the command
|
||||||
|
command.command.run(context)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# TODO: handle linking with library graphs
|
||||||
|
# Pipe previous command output (if there was a previous command)
|
||||||
|
edges: list[Edge] = list()
|
||||||
|
if len(history) > 0 or current_id != start_id:
|
||||||
|
from_id = (
|
||||||
|
history[0] if current_id == start_id else str(current_id - 1)
|
||||||
|
)
|
||||||
|
from_node = (
|
||||||
|
next(filter(lambda n: n[0].id == from_id, new_invocations))[0]
|
||||||
|
if current_id != start_id
|
||||||
|
else context.session.graph.get_node(from_id)
|
||||||
|
)
|
||||||
|
matching_edges = generate_matching_edges(
|
||||||
|
from_node, command.command, context
|
||||||
|
)
|
||||||
|
edges.extend(matching_edges)
|
||||||
|
|
||||||
|
# Parse provided links
|
||||||
|
if "link_node" in args and args["link_node"]:
|
||||||
|
for link in args["link_node"]:
|
||||||
|
node_id = link
|
||||||
|
if re_negid.match(node_id):
|
||||||
|
node_id = str(current_id + int(node_id))
|
||||||
|
|
||||||
|
link_node = context.session.graph.get_node(node_id)
|
||||||
|
matching_edges = generate_matching_edges(
|
||||||
|
link_node, command.command, context
|
||||||
|
)
|
||||||
|
matching_destinations = [e.destination for e in matching_edges]
|
||||||
|
edges = [e for e in edges if e.destination not in matching_destinations]
|
||||||
|
edges.extend(matching_edges)
|
||||||
|
|
||||||
|
if "link" in args and args["link"]:
|
||||||
|
for link in args["link"]:
|
||||||
|
edges = [e for e in edges if e.destination.node_id != command.command.id or e.destination.field != link[2]]
|
||||||
|
|
||||||
|
node_id = link[0]
|
||||||
|
if re_negid.match(node_id):
|
||||||
|
node_id = str(current_id + int(node_id))
|
||||||
|
|
||||||
|
# TODO: handle missing input/output
|
||||||
|
node_output = get_node_outputs(context.session.graph.get_node(node_id), context)[link[1]]
|
||||||
|
node_input = get_node_inputs(command.command, context)[link[2]]
|
||||||
|
|
||||||
|
edges.append(
|
||||||
|
Edge(
|
||||||
|
source=EdgeConnection(node_id=node_output.node_path, field=node_output.field),
|
||||||
|
destination=EdgeConnection(node_id=node_input.node_path, field=node_input.field)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
new_invocations.append((command.command, edges))
|
||||||
|
|
||||||
|
current_id = current_id + 1
|
||||||
|
|
||||||
|
# Add the node to the session
|
||||||
|
context.add_node(command.command)
|
||||||
|
for edge in edges:
|
||||||
|
print(edge)
|
||||||
|
context.add_edge(edge)
|
||||||
|
|
||||||
|
# Execute all remaining nodes
|
||||||
|
invoke_all(context)
|
||||||
|
|
||||||
|
except InvalidArgs:
|
||||||
|
print('Invalid command, use "help" to list commands')
|
||||||
|
continue
|
||||||
|
|
||||||
|
except SessionError:
|
||||||
|
# Start a new session
|
||||||
|
print("Session error: creating a new session")
|
||||||
|
context.reset()
|
||||||
|
|
||||||
|
except ExitCli:
|
||||||
|
break
|
||||||
|
|
||||||
|
except SystemExit:
|
||||||
|
continue
|
||||||
|
|
||||||
|
invoker.stop()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
invoke_cli()
|
12
invokeai/app/invocations/__init__.py
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
__all__ = []
|
||||||
|
|
||||||
|
dirname = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
for f in os.listdir(dirname):
|
||||||
|
if (
|
||||||
|
f != "__init__.py"
|
||||||
|
and os.path.isfile("%s/%s" % (dirname, f))
|
||||||
|
and f[-3:] == ".py"
|
||||||
|
):
|
||||||
|
__all__.append(f[:-3])
|
131
invokeai/app/invocations/baseinvocation.py
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from inspect import signature
|
||||||
|
from typing import get_args, get_type_hints, Dict, List, Literal, TypedDict
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from ..services.invocation_services import InvocationServices
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationContext:
|
||||||
|
services: InvocationServices
|
||||||
|
graph_execution_state_id: str
|
||||||
|
|
||||||
|
def __init__(self, services: InvocationServices, graph_execution_state_id: str):
|
||||||
|
self.services = services
|
||||||
|
self.graph_execution_state_id = graph_execution_state_id
|
||||||
|
|
||||||
|
|
||||||
|
class BaseInvocationOutput(BaseModel):
|
||||||
|
"""Base class for all invocation outputs"""
|
||||||
|
|
||||||
|
# All outputs must include a type name like this:
|
||||||
|
# type: Literal['your_output_name']
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_all_subclasses_tuple(cls):
|
||||||
|
subclasses = []
|
||||||
|
toprocess = [cls]
|
||||||
|
while len(toprocess) > 0:
|
||||||
|
next = toprocess.pop(0)
|
||||||
|
next_subclasses = next.__subclasses__()
|
||||||
|
subclasses.extend(next_subclasses)
|
||||||
|
toprocess.extend(next_subclasses)
|
||||||
|
return tuple(subclasses)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseInvocation(ABC, BaseModel):
|
||||||
|
"""A node to process inputs and produce outputs.
|
||||||
|
May use dependency injection in __init__ to receive providers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# All invocations must include a type name like this:
|
||||||
|
# type: Literal['your_output_name']
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_all_subclasses(cls):
|
||||||
|
subclasses = []
|
||||||
|
toprocess = [cls]
|
||||||
|
while len(toprocess) > 0:
|
||||||
|
next = toprocess.pop(0)
|
||||||
|
next_subclasses = next.__subclasses__()
|
||||||
|
subclasses.extend(next_subclasses)
|
||||||
|
toprocess.extend(next_subclasses)
|
||||||
|
return subclasses
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_invocations(cls):
|
||||||
|
return tuple(BaseInvocation.get_all_subclasses())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_invocations_map(cls):
|
||||||
|
# Get the type strings out of the literals and into a dictionary
|
||||||
|
return dict(map(lambda t: (get_args(get_type_hints(t)['type'])[0], t),BaseInvocation.get_all_subclasses()))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_output_type(cls):
|
||||||
|
return signature(cls.invoke).return_annotation
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def invoke(self, context: InvocationContext) -> BaseInvocationOutput:
|
||||||
|
"""Invoke with provided context and return outputs."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
#fmt: off
|
||||||
|
id: str = Field(description="The id of this node. Must be unique among all nodes.")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: figure out a better way to provide these hints
|
||||||
|
# TODO: when we can upgrade to python 3.11, we can use the`NotRequired` type instead of `total=False`
|
||||||
|
class UIConfig(TypedDict, total=False):
|
||||||
|
type_hints: Dict[
|
||||||
|
str,
|
||||||
|
Literal[
|
||||||
|
"integer",
|
||||||
|
"float",
|
||||||
|
"boolean",
|
||||||
|
"string",
|
||||||
|
"enum",
|
||||||
|
"image",
|
||||||
|
"latents",
|
||||||
|
"model",
|
||||||
|
],
|
||||||
|
]
|
||||||
|
tags: List[str]
|
||||||
|
title: str
|
||||||
|
|
||||||
|
class CustomisedSchemaExtra(TypedDict):
|
||||||
|
ui: UIConfig
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationConfig(BaseModel.Config):
|
||||||
|
"""Customizes pydantic's BaseModel.Config class for use by Invocations.
|
||||||
|
|
||||||
|
Provide `schema_extra` a `ui` dict to add hints for generated UIs.
|
||||||
|
|
||||||
|
`tags`
|
||||||
|
- A list of strings, used to categorise invocations.
|
||||||
|
|
||||||
|
`type_hints`
|
||||||
|
- A dict of field types which override the types in the invocation definition.
|
||||||
|
- Each key should be the name of one of the invocation's fields.
|
||||||
|
- Each value should be one of the valid types:
|
||||||
|
- `integer`, `float`, `boolean`, `string`, `enum`, `image`, `latents`, `model`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["stable-diffusion", "image"],
|
||||||
|
"type_hints": {
|
||||||
|
"initial_image": "image",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
schema_extra: CustomisedSchemaExtra
|
64
invokeai/app/invocations/collections.py
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Literal, Optional
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import numpy.random
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from .baseinvocation import (
|
||||||
|
BaseInvocation,
|
||||||
|
InvocationConfig,
|
||||||
|
InvocationContext,
|
||||||
|
BaseInvocationOutput,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class IntCollectionOutput(BaseInvocationOutput):
|
||||||
|
"""A collection of integers"""
|
||||||
|
|
||||||
|
type: Literal["int_collection"] = "int_collection"
|
||||||
|
|
||||||
|
# Outputs
|
||||||
|
collection: list[int] = Field(default=[], description="The int collection")
|
||||||
|
|
||||||
|
|
||||||
|
class RangeInvocation(BaseInvocation):
|
||||||
|
"""Creates a range"""
|
||||||
|
|
||||||
|
type: Literal["range"] = "range"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
start: int = Field(default=0, description="The start of the range")
|
||||||
|
stop: int = Field(default=10, description="The stop of the range")
|
||||||
|
step: int = Field(default=1, description="The step of the range")
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntCollectionOutput:
|
||||||
|
return IntCollectionOutput(
|
||||||
|
collection=list(range(self.start, self.stop, self.step))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class RandomRangeInvocation(BaseInvocation):
|
||||||
|
"""Creates a collection of random numbers"""
|
||||||
|
|
||||||
|
type: Literal["random_range"] = "random_range"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
low: int = Field(default=0, description="The inclusive low value")
|
||||||
|
high: int = Field(
|
||||||
|
default=np.iinfo(np.int32).max, description="The exclusive high value"
|
||||||
|
)
|
||||||
|
size: int = Field(default=1, description="The number of values to generate")
|
||||||
|
seed: Optional[int] = Field(
|
||||||
|
ge=0,
|
||||||
|
le=np.iinfo(np.int32).max,
|
||||||
|
description="The seed for the RNG",
|
||||||
|
default_factory=lambda: numpy.random.randint(0, np.iinfo(np.int32).max),
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntCollectionOutput:
|
||||||
|
rng = np.random.default_rng(self.seed)
|
||||||
|
return IntCollectionOutput(
|
||||||
|
collection=list(rng.integers(low=self.low, high=self.high, size=self.size))
|
||||||
|
)
|
69
invokeai/app/invocations/cv.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
import cv2 as cv
|
||||||
|
import numpy
|
||||||
|
from PIL import Image, ImageOps
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from invokeai.app.models.image import ImageField, ImageType
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
|
||||||
|
from .image import ImageOutput, build_image_output
|
||||||
|
|
||||||
|
|
||||||
|
class CvInvocationConfig(BaseModel):
|
||||||
|
"""Helper class to provide all OpenCV invocations with additional config"""
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["cv", "image"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class CvInpaintInvocation(BaseInvocation, CvInvocationConfig):
|
||||||
|
"""Simple inpaint using opencv."""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["cv_inpaint"] = "cv_inpaint"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to inpaint")
|
||||||
|
mask: ImageField = Field(default=None, description="The mask to use when inpainting")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
mask = context.services.images.get(self.mask.image_type, self.mask.image_name)
|
||||||
|
|
||||||
|
# Convert to cv image/mask
|
||||||
|
# TODO: consider making these utility functions
|
||||||
|
cv_image = cv.cvtColor(numpy.array(image.convert("RGB")), cv.COLOR_RGB2BGR)
|
||||||
|
cv_mask = numpy.array(ImageOps.invert(mask))
|
||||||
|
|
||||||
|
# Inpaint
|
||||||
|
cv_inpainted = cv.inpaint(cv_image, cv_mask, 3, cv.INPAINT_TELEA)
|
||||||
|
|
||||||
|
# Convert back to Pillow
|
||||||
|
# TODO: consider making a utility function
|
||||||
|
image_inpainted = Image.fromarray(cv.cvtColor(cv_inpainted, cv.COLOR_BGR2RGB))
|
||||||
|
|
||||||
|
image_type = ImageType.INTERMEDIATE
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(image_type, image_name, image_inpainted, metadata)
|
||||||
|
return build_image_output(
|
||||||
|
image_type=image_type,
|
||||||
|
image_name=image_name,
|
||||||
|
image=image_inpainted,
|
||||||
|
)
|
281
invokeai/app/invocations/generate.py
Normal file
@ -0,0 +1,281 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
from typing import Literal, Optional, Union
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from torch import Tensor
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from invokeai.app.models.image import ImageField, ImageType
|
||||||
|
from invokeai.app.invocations.util.choose_model import choose_model
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
|
||||||
|
from .image import ImageOutput, build_image_output
|
||||||
|
from ...backend.generator import Txt2Img, Img2Img, Inpaint, InvokeAIGenerator
|
||||||
|
from ...backend.stable_diffusion import PipelineIntermediateState
|
||||||
|
from ..util.step_callback import stable_diffusion_step_callback
|
||||||
|
|
||||||
|
SAMPLER_NAME_VALUES = Literal[tuple(InvokeAIGenerator.schedulers())]
|
||||||
|
|
||||||
|
|
||||||
|
class SDImageInvocation(BaseModel):
|
||||||
|
"""Helper class to provide all Stable Diffusion raster image invocations with additional config"""
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["stable-diffusion", "image"],
|
||||||
|
"type_hints": {
|
||||||
|
"model": "model",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Text to image
|
||||||
|
class TextToImageInvocation(BaseInvocation, SDImageInvocation):
|
||||||
|
"""Generates an image using text2img."""
|
||||||
|
|
||||||
|
type: Literal["txt2img"] = "txt2img"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
# TODO: consider making prompt optional to enable providing prompt through a link
|
||||||
|
# fmt: off
|
||||||
|
prompt: Optional[str] = Field(description="The prompt to generate an image from")
|
||||||
|
seed: int = Field(default=-1,ge=-1, le=np.iinfo(np.uint32).max, description="The seed to use (-1 for a random seed)", )
|
||||||
|
steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image")
|
||||||
|
width: int = Field(default=512, multiple_of=64, gt=0, description="The width of the resulting image", )
|
||||||
|
height: int = Field(default=512, multiple_of=64, gt=0, description="The height of the resulting image", )
|
||||||
|
cfg_scale: float = Field(default=7.5, gt=0, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
|
||||||
|
scheduler: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The scheduler to use" )
|
||||||
|
seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
|
||||||
|
model: str = Field(default="", description="The model to use (currently ignored)")
|
||||||
|
progress_images: bool = Field(default=False, description="Whether or not to produce progress images during generation", )
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
# TODO: pass this an emitter method or something? or a session for dispatching?
|
||||||
|
def dispatch_progress(
|
||||||
|
self,
|
||||||
|
context: InvocationContext,
|
||||||
|
source_node_id: str,
|
||||||
|
intermediate_state: PipelineIntermediateState,
|
||||||
|
) -> None:
|
||||||
|
stable_diffusion_step_callback(
|
||||||
|
context=context,
|
||||||
|
intermediate_state=intermediate_state,
|
||||||
|
node=self.dict(),
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
# Handle invalid model parameter
|
||||||
|
model = choose_model(context.services.model_manager, self.model)
|
||||||
|
|
||||||
|
# Get the source node id (we are invoking the prepared node)
|
||||||
|
graph_execution_state = context.services.graph_execution_manager.get(
|
||||||
|
context.graph_execution_state_id
|
||||||
|
)
|
||||||
|
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||||
|
|
||||||
|
outputs = Txt2Img(model).generate(
|
||||||
|
prompt=self.prompt,
|
||||||
|
step_callback=partial(self.dispatch_progress, context, source_node_id),
|
||||||
|
**self.dict(
|
||||||
|
exclude={"prompt"}
|
||||||
|
), # Shorthand for passing all of the parameters above manually
|
||||||
|
)
|
||||||
|
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
||||||
|
# each time it is called. We only need the first one.
|
||||||
|
generate_output = next(outputs)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now and ignore the seed
|
||||||
|
# TODO: pre-seed?
|
||||||
|
# TODO: can this return multiple results? Should it?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(
|
||||||
|
image_type, image_name, generate_output.image, metadata
|
||||||
|
)
|
||||||
|
return build_image_output(
|
||||||
|
image_type=image_type,
|
||||||
|
image_name=image_name,
|
||||||
|
image=generate_output.image,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageToImageInvocation(TextToImageInvocation):
|
||||||
|
"""Generates an image using img2img."""
|
||||||
|
|
||||||
|
type: Literal["img2img"] = "img2img"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(description="The input image")
|
||||||
|
strength: float = Field(
|
||||||
|
default=0.75, gt=0, le=1, description="The strength of the original image"
|
||||||
|
)
|
||||||
|
fit: bool = Field(
|
||||||
|
default=True,
|
||||||
|
description="Whether or not the result should be fit to the aspect ratio of the input image",
|
||||||
|
)
|
||||||
|
|
||||||
|
def dispatch_progress(
|
||||||
|
self,
|
||||||
|
context: InvocationContext,
|
||||||
|
source_node_id: str,
|
||||||
|
intermediate_state: PipelineIntermediateState,
|
||||||
|
) -> None:
|
||||||
|
stable_diffusion_step_callback(
|
||||||
|
context=context,
|
||||||
|
intermediate_state=intermediate_state,
|
||||||
|
node=self.dict(),
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = (
|
||||||
|
None
|
||||||
|
if self.image is None
|
||||||
|
else context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
)
|
||||||
|
mask = None
|
||||||
|
|
||||||
|
# Handle invalid model parameter
|
||||||
|
model = choose_model(context.services.model_manager, self.model)
|
||||||
|
|
||||||
|
# Get the source node id (we are invoking the prepared node)
|
||||||
|
graph_execution_state = context.services.graph_execution_manager.get(
|
||||||
|
context.graph_execution_state_id
|
||||||
|
)
|
||||||
|
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||||
|
|
||||||
|
outputs = Img2Img(model).generate(
|
||||||
|
prompt=self.prompt,
|
||||||
|
init_image=image,
|
||||||
|
init_mask=mask,
|
||||||
|
step_callback=partial(self.dispatch_progress, context, source_node_id),
|
||||||
|
**self.dict(
|
||||||
|
exclude={"prompt", "image", "mask"}
|
||||||
|
), # Shorthand for passing all of the parameters above manually
|
||||||
|
)
|
||||||
|
|
||||||
|
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
||||||
|
# each time it is called. We only need the first one.
|
||||||
|
generator_output = next(outputs)
|
||||||
|
|
||||||
|
result_image = generator_output.image
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now and ignore the seed
|
||||||
|
# TODO: pre-seed?
|
||||||
|
# TODO: can this return multiple results? Should it?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(image_type, image_name, result_image, metadata)
|
||||||
|
return build_image_output(
|
||||||
|
image_type=image_type,
|
||||||
|
image_name=image_name,
|
||||||
|
image=result_image,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class InpaintInvocation(ImageToImageInvocation):
|
||||||
|
"""Generates an image using inpaint."""
|
||||||
|
|
||||||
|
type: Literal["inpaint"] = "inpaint"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
mask: Union[ImageField, None] = Field(description="The mask")
|
||||||
|
inpaint_replace: float = Field(
|
||||||
|
default=0.0,
|
||||||
|
ge=0.0,
|
||||||
|
le=1.0,
|
||||||
|
description="The amount by which to replace masked areas with latent noise",
|
||||||
|
)
|
||||||
|
|
||||||
|
def dispatch_progress(
|
||||||
|
self,
|
||||||
|
context: InvocationContext,
|
||||||
|
source_node_id: str,
|
||||||
|
intermediate_state: PipelineIntermediateState,
|
||||||
|
) -> None:
|
||||||
|
stable_diffusion_step_callback(
|
||||||
|
context=context,
|
||||||
|
intermediate_state=intermediate_state,
|
||||||
|
node=self.dict(),
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = (
|
||||||
|
None
|
||||||
|
if self.image is None
|
||||||
|
else context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
)
|
||||||
|
mask = (
|
||||||
|
None
|
||||||
|
if self.mask is None
|
||||||
|
else context.services.images.get(self.mask.image_type, self.mask.image_name)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handle invalid model parameter
|
||||||
|
model = choose_model(context.services.model_manager, self.model)
|
||||||
|
|
||||||
|
# Get the source node id (we are invoking the prepared node)
|
||||||
|
graph_execution_state = context.services.graph_execution_manager.get(
|
||||||
|
context.graph_execution_state_id
|
||||||
|
)
|
||||||
|
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||||
|
|
||||||
|
outputs = Inpaint(model).generate(
|
||||||
|
prompt=self.prompt,
|
||||||
|
init_img=image,
|
||||||
|
init_mask=mask,
|
||||||
|
step_callback=partial(self.dispatch_progress, context, source_node_id),
|
||||||
|
**self.dict(
|
||||||
|
exclude={"prompt", "image", "mask"}
|
||||||
|
), # Shorthand for passing all of the parameters above manually
|
||||||
|
)
|
||||||
|
|
||||||
|
# Outputs is an infinite iterator that will return a new InvokeAIGeneratorOutput object
|
||||||
|
# each time it is called. We only need the first one.
|
||||||
|
generator_output = next(outputs)
|
||||||
|
|
||||||
|
result_image = generator_output.image
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now and ignore the seed
|
||||||
|
# TODO: pre-seed?
|
||||||
|
# TODO: can this return multiple results? Should it?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(image_type, image_name, result_image, metadata)
|
||||||
|
return build_image_output(
|
||||||
|
image_type=image_type,
|
||||||
|
image_name=image_name,
|
||||||
|
image=result_image,
|
||||||
|
)
|
369
invokeai/app/invocations/image.py
Normal file
@ -0,0 +1,369 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Literal, Optional
|
||||||
|
|
||||||
|
import numpy
|
||||||
|
from PIL import Image, ImageFilter, ImageOps
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from ..models.image import ImageField, ImageType
|
||||||
|
from .baseinvocation import (
|
||||||
|
BaseInvocation,
|
||||||
|
BaseInvocationOutput,
|
||||||
|
InvocationContext,
|
||||||
|
InvocationConfig,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PILInvocationConfig(BaseModel):
|
||||||
|
"""Helper class to provide all PIL invocations with additional config"""
|
||||||
|
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["PIL", "image"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ImageOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output an image"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["image"] = "image"
|
||||||
|
image: ImageField = Field(default=None, description="The output image")
|
||||||
|
width: Optional[int] = Field(default=None, description="The width of the image in pixels")
|
||||||
|
height: Optional[int] = Field(default=None, description="The height of the image in pixels")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {
|
||||||
|
"required": ["type", "image", "width", "height", "mode"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def build_image_output(
|
||||||
|
image_type: ImageType, image_name: str, image: Image.Image
|
||||||
|
) -> ImageOutput:
|
||||||
|
"""Builds an ImageOutput and its ImageField"""
|
||||||
|
image_field = ImageField(
|
||||||
|
image_name=image_name,
|
||||||
|
image_type=image_type,
|
||||||
|
)
|
||||||
|
return ImageOutput(
|
||||||
|
image=image_field,
|
||||||
|
width=image.width,
|
||||||
|
height=image.height,
|
||||||
|
mode=image.mode,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class MaskOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output a mask"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["mask"] = "mask"
|
||||||
|
mask: ImageField = Field(default=None, description="The output mask")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {
|
||||||
|
"required": [
|
||||||
|
"type",
|
||||||
|
"mask",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class LoadImageInvocation(BaseInvocation):
|
||||||
|
"""Load an image and provide it as output."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["load_image"] = "load_image"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image_type: ImageType = Field(description="The type of the image")
|
||||||
|
image_name: str = Field(description="The name of the image")
|
||||||
|
# fmt: on
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(self.image_type, self.image_name)
|
||||||
|
|
||||||
|
return build_image_output(
|
||||||
|
image_type=self.image_type,
|
||||||
|
image_name=self.image_name,
|
||||||
|
image=image,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ShowImageInvocation(BaseInvocation):
|
||||||
|
"""Displays a provided image, and passes it forward in the pipeline."""
|
||||||
|
|
||||||
|
type: Literal["show_image"] = "show_image"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to show")
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
if image:
|
||||||
|
image.show()
|
||||||
|
|
||||||
|
# TODO: how to handle failure?
|
||||||
|
|
||||||
|
return build_image_output(
|
||||||
|
image_type=self.image.image_type,
|
||||||
|
image_name=self.image.image_name,
|
||||||
|
image=image,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CropImageInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Crops an image to a specified box. The box can be outside of the image."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["crop"] = "crop"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to crop")
|
||||||
|
x: int = Field(default=0, description="The left x coordinate of the crop rectangle")
|
||||||
|
y: int = Field(default=0, description="The top y coordinate of the crop rectangle")
|
||||||
|
width: int = Field(default=512, gt=0, description="The width of the crop rectangle")
|
||||||
|
height: int = Field(default=512, gt=0, description="The height of the crop rectangle")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
|
||||||
|
image_crop = Image.new(
|
||||||
|
mode="RGBA", size=(self.width, self.height), color=(0, 0, 0, 0)
|
||||||
|
)
|
||||||
|
image_crop.paste(image, (-self.x, -self.y))
|
||||||
|
|
||||||
|
image_type = ImageType.INTERMEDIATE
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(image_type, image_name, image_crop, metadata)
|
||||||
|
return build_image_output(
|
||||||
|
image_type=image_type,
|
||||||
|
image_name=image_name,
|
||||||
|
image=image_crop,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class PasteImageInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Pastes an image into another image."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["paste"] = "paste"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
base_image: ImageField = Field(default=None, description="The base image")
|
||||||
|
image: ImageField = Field(default=None, description="The image to paste")
|
||||||
|
mask: Optional[ImageField] = Field(default=None, description="The mask to use when pasting")
|
||||||
|
x: int = Field(default=0, description="The left x coordinate at which to paste the image")
|
||||||
|
y: int = Field(default=0, description="The top y coordinate at which to paste the image")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
base_image = context.services.images.get(
|
||||||
|
self.base_image.image_type, self.base_image.image_name
|
||||||
|
)
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
mask = (
|
||||||
|
None
|
||||||
|
if self.mask is None
|
||||||
|
else ImageOps.invert(
|
||||||
|
context.services.images.get(self.mask.image_type, self.mask.image_name)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# TODO: probably shouldn't invert mask here... should user be required to do it?
|
||||||
|
|
||||||
|
min_x = min(0, self.x)
|
||||||
|
min_y = min(0, self.y)
|
||||||
|
max_x = max(base_image.width, image.width + self.x)
|
||||||
|
max_y = max(base_image.height, image.height + self.y)
|
||||||
|
|
||||||
|
new_image = Image.new(
|
||||||
|
mode="RGBA", size=(max_x - min_x, max_y - min_y), color=(0, 0, 0, 0)
|
||||||
|
)
|
||||||
|
new_image.paste(base_image, (abs(min_x), abs(min_y)))
|
||||||
|
new_image.paste(image, (max(0, self.x), max(0, self.y)), mask=mask)
|
||||||
|
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(image_type, image_name, new_image, metadata)
|
||||||
|
return build_image_output(
|
||||||
|
image_type=image_type,
|
||||||
|
image_name=image_name,
|
||||||
|
image=new_image,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class MaskFromAlphaInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Extracts the alpha channel of an image as a mask."""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["tomask"] = "tomask"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to create the mask from")
|
||||||
|
invert: bool = Field(default=False, description="Whether or not to invert the mask")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> MaskOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
|
||||||
|
image_mask = image.split()[-1]
|
||||||
|
if self.invert:
|
||||||
|
image_mask = ImageOps.invert(image_mask)
|
||||||
|
|
||||||
|
image_type = ImageType.INTERMEDIATE
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(image_type, image_name, image_mask, metadata)
|
||||||
|
return MaskOutput(mask=ImageField(image_type=image_type, image_name=image_name))
|
||||||
|
|
||||||
|
|
||||||
|
class BlurInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Blurs an image"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["blur"] = "blur"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to blur")
|
||||||
|
radius: float = Field(default=8.0, ge=0, description="The blur radius")
|
||||||
|
blur_type: Literal["gaussian", "box"] = Field(default="gaussian", description="The type of blur")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
|
||||||
|
blur = (
|
||||||
|
ImageFilter.GaussianBlur(self.radius)
|
||||||
|
if self.blur_type == "gaussian"
|
||||||
|
else ImageFilter.BoxBlur(self.radius)
|
||||||
|
)
|
||||||
|
blur_image = image.filter(blur)
|
||||||
|
|
||||||
|
image_type = ImageType.INTERMEDIATE
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(image_type, image_name, blur_image, metadata)
|
||||||
|
return build_image_output(
|
||||||
|
image_type=image_type, image_name=image_name, image=blur_image
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class LerpInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Linear interpolation of all pixels of an image"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["lerp"] = "lerp"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to lerp")
|
||||||
|
min: int = Field(default=0, ge=0, le=255, description="The minimum output value")
|
||||||
|
max: int = Field(default=255, ge=0, le=255, description="The maximum output value")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
|
||||||
|
image_arr = numpy.asarray(image, dtype=numpy.float32) / 255
|
||||||
|
image_arr = image_arr * (self.max - self.min) + self.max
|
||||||
|
|
||||||
|
lerp_image = Image.fromarray(numpy.uint8(image_arr))
|
||||||
|
|
||||||
|
image_type = ImageType.INTERMEDIATE
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(image_type, image_name, lerp_image, metadata)
|
||||||
|
return build_image_output(
|
||||||
|
image_type=image_type, image_name=image_name, image=lerp_image
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class InverseLerpInvocation(BaseInvocation, PILInvocationConfig):
|
||||||
|
"""Inverse linear interpolation of all pixels of an image"""
|
||||||
|
|
||||||
|
# fmt: off
|
||||||
|
type: Literal["ilerp"] = "ilerp"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: ImageField = Field(default=None, description="The image to lerp")
|
||||||
|
min: int = Field(default=0, ge=0, le=255, description="The minimum input value")
|
||||||
|
max: int = Field(default=255, ge=0, le=255, description="The maximum input value")
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
|
||||||
|
image_arr = numpy.asarray(image, dtype=numpy.float32)
|
||||||
|
image_arr = (
|
||||||
|
numpy.minimum(
|
||||||
|
numpy.maximum(image_arr - self.min, 0) / float(self.max - self.min), 1
|
||||||
|
)
|
||||||
|
* 255
|
||||||
|
)
|
||||||
|
|
||||||
|
ilerp_image = Image.fromarray(numpy.uint8(image_arr))
|
||||||
|
|
||||||
|
image_type = ImageType.INTERMEDIATE
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(image_type, image_name, ilerp_image, metadata)
|
||||||
|
return build_image_output(
|
||||||
|
image_type=image_type, image_name=image_name, image=ilerp_image
|
||||||
|
)
|
371
invokeai/app/invocations/latent.py
Normal file
@ -0,0 +1,371 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import random
|
||||||
|
from typing import Literal, Optional
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from invokeai.app.invocations.util.choose_model import choose_model
|
||||||
|
|
||||||
|
from invokeai.app.util.step_callback import stable_diffusion_step_callback
|
||||||
|
|
||||||
|
from ...backend.model_management.model_manager import ModelManager
|
||||||
|
from ...backend.util.devices import choose_torch_device, torch_dtype
|
||||||
|
from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings
|
||||||
|
from ...backend.image_util.seamless import configure_model_padding
|
||||||
|
from ...backend.prompting.conditioning import get_uc_and_c_and_ec
|
||||||
|
from ...backend.stable_diffusion.diffusers_pipeline import ConditioningData, StableDiffusionGeneratorPipeline
|
||||||
|
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
|
||||||
|
import numpy as np
|
||||||
|
from ..services.image_storage import ImageType
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext
|
||||||
|
from .image import ImageField, ImageOutput, build_image_output
|
||||||
|
from ...backend.stable_diffusion import PipelineIntermediateState
|
||||||
|
from diffusers.schedulers import SchedulerMixin as Scheduler
|
||||||
|
import diffusers
|
||||||
|
from diffusers import DiffusionPipeline
|
||||||
|
|
||||||
|
|
||||||
|
class LatentsField(BaseModel):
|
||||||
|
"""A latents field used for passing latents between invocations"""
|
||||||
|
|
||||||
|
latents_name: Optional[str] = Field(default=None, description="The name of the latents")
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {"required": ["latents_name"]}
|
||||||
|
|
||||||
|
class LatentsOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output latents"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["latent_output"] = "latent_output"
|
||||||
|
latents: LatentsField = Field(default=None, description="The output latents")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
class NoiseOutput(BaseInvocationOutput):
|
||||||
|
"""Invocation noise output"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["noise_output"] = "noise_output"
|
||||||
|
noise: LatentsField = Field(default=None, description="The output noise")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: this seems like a hack
|
||||||
|
scheduler_map = dict(
|
||||||
|
ddim=diffusers.DDIMScheduler,
|
||||||
|
dpmpp_2=diffusers.DPMSolverMultistepScheduler,
|
||||||
|
k_dpm_2=diffusers.KDPM2DiscreteScheduler,
|
||||||
|
k_dpm_2_a=diffusers.KDPM2AncestralDiscreteScheduler,
|
||||||
|
k_dpmpp_2=diffusers.DPMSolverMultistepScheduler,
|
||||||
|
k_euler=diffusers.EulerDiscreteScheduler,
|
||||||
|
k_euler_a=diffusers.EulerAncestralDiscreteScheduler,
|
||||||
|
k_heun=diffusers.HeunDiscreteScheduler,
|
||||||
|
k_lms=diffusers.LMSDiscreteScheduler,
|
||||||
|
plms=diffusers.PNDMScheduler,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
SAMPLER_NAME_VALUES = Literal[
|
||||||
|
tuple(list(scheduler_map.keys()))
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def get_scheduler(scheduler_name:str, model: StableDiffusionGeneratorPipeline)->Scheduler:
|
||||||
|
scheduler_class = scheduler_map.get(scheduler_name,'ddim')
|
||||||
|
scheduler = scheduler_class.from_config(model.scheduler.config)
|
||||||
|
# hack copied over from generate.py
|
||||||
|
if not hasattr(scheduler, 'uses_inpainting_model'):
|
||||||
|
scheduler.uses_inpainting_model = lambda: False
|
||||||
|
return scheduler
|
||||||
|
|
||||||
|
|
||||||
|
def get_noise(width:int, height:int, device:torch.device, seed:int = 0, latent_channels:int=4, use_mps_noise:bool=False, downsampling_factor:int = 8):
|
||||||
|
# limit noise to only the diffusion image channels, not the mask channels
|
||||||
|
input_channels = min(latent_channels, 4)
|
||||||
|
use_device = "cpu" if (use_mps_noise or device.type == "mps") else device
|
||||||
|
generator = torch.Generator(device=use_device).manual_seed(seed)
|
||||||
|
x = torch.randn(
|
||||||
|
[
|
||||||
|
1,
|
||||||
|
input_channels,
|
||||||
|
height // downsampling_factor,
|
||||||
|
width // downsampling_factor,
|
||||||
|
],
|
||||||
|
dtype=torch_dtype(device),
|
||||||
|
device=use_device,
|
||||||
|
generator=generator,
|
||||||
|
).to(device)
|
||||||
|
# if self.perlin > 0.0:
|
||||||
|
# perlin_noise = self.get_perlin_noise(
|
||||||
|
# width // self.downsampling_factor, height // self.downsampling_factor
|
||||||
|
# )
|
||||||
|
# x = (1 - self.perlin) * x + self.perlin * perlin_noise
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
def random_seed():
|
||||||
|
return random.randint(0, np.iinfo(np.uint32).max)
|
||||||
|
|
||||||
|
|
||||||
|
class NoiseInvocation(BaseInvocation):
|
||||||
|
"""Generates latent noise."""
|
||||||
|
|
||||||
|
type: Literal["noise"] = "noise"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
seed: int = Field(ge=0, le=np.iinfo(np.uint32).max, description="The seed to use", default_factory=random_seed)
|
||||||
|
width: int = Field(default=512, multiple_of=64, gt=0, description="The width of the resulting noise", )
|
||||||
|
height: int = Field(default=512, multiple_of=64, gt=0, description="The height of the resulting noise", )
|
||||||
|
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["latents", "noise"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> NoiseOutput:
|
||||||
|
device = torch.device(choose_torch_device())
|
||||||
|
noise = get_noise(self.width, self.height, device, self.seed)
|
||||||
|
|
||||||
|
name = f'{context.graph_execution_state_id}__{self.id}'
|
||||||
|
context.services.latents.set(name, noise)
|
||||||
|
return NoiseOutput(
|
||||||
|
noise=LatentsField(latents_name=name)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Text to image
|
||||||
|
class TextToLatentsInvocation(BaseInvocation):
|
||||||
|
"""Generates latents from a prompt."""
|
||||||
|
|
||||||
|
type: Literal["t2l"] = "t2l"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
# TODO: consider making prompt optional to enable providing prompt through a link
|
||||||
|
# fmt: off
|
||||||
|
prompt: Optional[str] = Field(description="The prompt to generate an image from")
|
||||||
|
seed: int = Field(default=-1,ge=-1, le=np.iinfo(np.uint32).max, description="The seed to use (-1 for a random seed)", )
|
||||||
|
noise: Optional[LatentsField] = Field(description="The noise to use")
|
||||||
|
steps: int = Field(default=10, gt=0, description="The number of steps to use to generate the image")
|
||||||
|
width: int = Field(default=512, multiple_of=64, gt=0, description="The width of the resulting image", )
|
||||||
|
height: int = Field(default=512, multiple_of=64, gt=0, description="The height of the resulting image", )
|
||||||
|
cfg_scale: float = Field(default=7.5, gt=0, description="The Classifier-Free Guidance, higher values may result in a result closer to the prompt", )
|
||||||
|
scheduler: SAMPLER_NAME_VALUES = Field(default="k_lms", description="The scheduler to use" )
|
||||||
|
seamless: bool = Field(default=False, description="Whether or not to generate an image that can tile without seams", )
|
||||||
|
seamless_axes: str = Field(default="", description="The axes to tile the image on, 'x' and/or 'y'")
|
||||||
|
model: str = Field(default="", description="The model to use (currently ignored)")
|
||||||
|
progress_images: bool = Field(default=False, description="Whether or not to produce progress images during generation", )
|
||||||
|
# fmt: on
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["latents", "image"],
|
||||||
|
"type_hints": {
|
||||||
|
"model": "model"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO: pass this an emitter method or something? or a session for dispatching?
|
||||||
|
def dispatch_progress(
|
||||||
|
self, context: InvocationContext, source_node_id: str, intermediate_state: PipelineIntermediateState
|
||||||
|
) -> None:
|
||||||
|
stable_diffusion_step_callback(
|
||||||
|
context=context,
|
||||||
|
intermediate_state=intermediate_state,
|
||||||
|
node=self.dict(),
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_model(self, model_manager: ModelManager) -> StableDiffusionGeneratorPipeline:
|
||||||
|
model_info = choose_model(model_manager, self.model)
|
||||||
|
model_name = model_info['model_name']
|
||||||
|
model_hash = model_info['hash']
|
||||||
|
model: StableDiffusionGeneratorPipeline = model_info['model']
|
||||||
|
model.scheduler = get_scheduler(
|
||||||
|
model=model,
|
||||||
|
scheduler_name=self.scheduler
|
||||||
|
)
|
||||||
|
|
||||||
|
if isinstance(model, DiffusionPipeline):
|
||||||
|
for component in [model.unet, model.vae]:
|
||||||
|
configure_model_padding(component,
|
||||||
|
self.seamless,
|
||||||
|
self.seamless_axes
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
configure_model_padding(model,
|
||||||
|
self.seamless,
|
||||||
|
self.seamless_axes
|
||||||
|
)
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
def get_conditioning_data(self, model: StableDiffusionGeneratorPipeline) -> ConditioningData:
|
||||||
|
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(self.prompt, model=model)
|
||||||
|
conditioning_data = ConditioningData(
|
||||||
|
uc,
|
||||||
|
c,
|
||||||
|
self.cfg_scale,
|
||||||
|
extra_conditioning_info,
|
||||||
|
postprocessing_settings=PostprocessingSettings(
|
||||||
|
threshold=0.0,#threshold,
|
||||||
|
warmup=0.2,#warmup,
|
||||||
|
h_symmetry_time_pct=None,#h_symmetry_time_pct,
|
||||||
|
v_symmetry_time_pct=None#v_symmetry_time_pct,
|
||||||
|
),
|
||||||
|
).add_scheduler_args_if_applicable(model.scheduler, eta=None)#ddim_eta)
|
||||||
|
return conditioning_data
|
||||||
|
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||||
|
noise = context.services.latents.get(self.noise.latents_name)
|
||||||
|
|
||||||
|
# Get the source node id (we are invoking the prepared node)
|
||||||
|
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
|
||||||
|
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||||
|
|
||||||
|
def step_callback(state: PipelineIntermediateState):
|
||||||
|
self.dispatch_progress(context, source_node_id, state)
|
||||||
|
|
||||||
|
model = self.get_model(context.services.model_manager)
|
||||||
|
conditioning_data = self.get_conditioning_data(model)
|
||||||
|
|
||||||
|
# TODO: Verify the noise is the right size
|
||||||
|
|
||||||
|
result_latents, result_attention_map_saver = model.latents_from_embeddings(
|
||||||
|
latents=torch.zeros_like(noise, dtype=torch_dtype(model.device)),
|
||||||
|
noise=noise,
|
||||||
|
num_inference_steps=self.steps,
|
||||||
|
conditioning_data=conditioning_data,
|
||||||
|
callback=step_callback
|
||||||
|
)
|
||||||
|
|
||||||
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
name = f'{context.graph_execution_state_id}__{self.id}'
|
||||||
|
context.services.latents.set(name, result_latents)
|
||||||
|
return LatentsOutput(
|
||||||
|
latents=LatentsField(latents_name=name)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class LatentsToLatentsInvocation(TextToLatentsInvocation):
|
||||||
|
"""Generates latents using latents as base image."""
|
||||||
|
|
||||||
|
type: Literal["l2l"] = "l2l"
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["latents"],
|
||||||
|
"type_hints": {
|
||||||
|
"model": "model"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
latents: Optional[LatentsField] = Field(description="The latents to use as a base image")
|
||||||
|
strength: float = Field(default=0.5, description="The strength of the latents to use")
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> LatentsOutput:
|
||||||
|
noise = context.services.latents.get(self.noise.latents_name)
|
||||||
|
latent = context.services.latents.get(self.latents.latents_name)
|
||||||
|
|
||||||
|
# Get the source node id (we are invoking the prepared node)
|
||||||
|
graph_execution_state = context.services.graph_execution_manager.get(context.graph_execution_state_id)
|
||||||
|
source_node_id = graph_execution_state.prepared_source_mapping[self.id]
|
||||||
|
|
||||||
|
def step_callback(state: PipelineIntermediateState):
|
||||||
|
self.dispatch_progress(context, source_node_id, state)
|
||||||
|
|
||||||
|
model = self.get_model(context.services.model_manager)
|
||||||
|
conditioning_data = self.get_conditioning_data(model)
|
||||||
|
|
||||||
|
# TODO: Verify the noise is the right size
|
||||||
|
|
||||||
|
initial_latents = latent if self.strength < 1.0 else torch.zeros_like(
|
||||||
|
latent, device=model.device, dtype=latent.dtype
|
||||||
|
)
|
||||||
|
|
||||||
|
timesteps, _ = model.get_img2img_timesteps(
|
||||||
|
self.steps,
|
||||||
|
self.strength,
|
||||||
|
device=model.device,
|
||||||
|
)
|
||||||
|
|
||||||
|
result_latents, result_attention_map_saver = model.latents_from_embeddings(
|
||||||
|
latents=initial_latents,
|
||||||
|
timesteps=timesteps,
|
||||||
|
noise=noise,
|
||||||
|
num_inference_steps=self.steps,
|
||||||
|
conditioning_data=conditioning_data,
|
||||||
|
callback=step_callback
|
||||||
|
)
|
||||||
|
|
||||||
|
# https://discuss.huggingface.co/t/memory-usage-by-later-pipeline-stages/23699
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
name = f'{context.graph_execution_state_id}__{self.id}'
|
||||||
|
context.services.latents.set(name, result_latents)
|
||||||
|
return LatentsOutput(
|
||||||
|
latents=LatentsField(latents_name=name)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Latent to image
|
||||||
|
class LatentsToImageInvocation(BaseInvocation):
|
||||||
|
"""Generates an image from latents."""
|
||||||
|
|
||||||
|
type: Literal["l2i"] = "l2i"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
latents: Optional[LatentsField] = Field(description="The latents to generate an image from")
|
||||||
|
model: str = Field(default="", description="The model to use")
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["latents", "image"],
|
||||||
|
"type_hints": {
|
||||||
|
"model": "model"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
latents = context.services.latents.get(self.latents.latents_name)
|
||||||
|
|
||||||
|
# TODO: this only really needs the vae
|
||||||
|
model_info = choose_model(context.services.model_manager, self.model)
|
||||||
|
model: StableDiffusionGeneratorPipeline = model_info['model']
|
||||||
|
|
||||||
|
with torch.inference_mode():
|
||||||
|
np_image = model.decode_latents(latents)
|
||||||
|
image = model.numpy_to_pil(np_image)[0]
|
||||||
|
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(image_type, image_name, image, metadata)
|
||||||
|
return build_image_output(
|
||||||
|
image_type=image_type,
|
||||||
|
image_name=image_name,
|
||||||
|
image=image
|
||||||
|
)
|
75
invokeai/app/invocations/math.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext, InvocationConfig
|
||||||
|
|
||||||
|
|
||||||
|
class MathInvocationConfig(BaseModel):
|
||||||
|
"""Helper class to provide all math invocations with additional config"""
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["math"],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class IntOutput(BaseInvocationOutput):
|
||||||
|
"""An integer output"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["int_output"] = "int_output"
|
||||||
|
a: int = Field(default=None, description="The output integer")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
class AddInvocation(BaseInvocation, MathInvocationConfig):
|
||||||
|
"""Adds two numbers"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["add"] = "add"
|
||||||
|
a: int = Field(default=0, description="The first number")
|
||||||
|
b: int = Field(default=0, description="The second number")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||||
|
return IntOutput(a=self.a + self.b)
|
||||||
|
|
||||||
|
|
||||||
|
class SubtractInvocation(BaseInvocation, MathInvocationConfig):
|
||||||
|
"""Subtracts two numbers"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["sub"] = "sub"
|
||||||
|
a: int = Field(default=0, description="The first number")
|
||||||
|
b: int = Field(default=0, description="The second number")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||||
|
return IntOutput(a=self.a - self.b)
|
||||||
|
|
||||||
|
|
||||||
|
class MultiplyInvocation(BaseInvocation, MathInvocationConfig):
|
||||||
|
"""Multiplies two numbers"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["mul"] = "mul"
|
||||||
|
a: int = Field(default=0, description="The first number")
|
||||||
|
b: int = Field(default=0, description="The second number")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||||
|
return IntOutput(a=self.a * self.b)
|
||||||
|
|
||||||
|
|
||||||
|
class DivideInvocation(BaseInvocation, MathInvocationConfig):
|
||||||
|
"""Divides two numbers"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["div"] = "div"
|
||||||
|
a: int = Field(default=0, description="The first number")
|
||||||
|
b: int = Field(default=0, description="The second number")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||||
|
return IntOutput(a=int(self.a / self.b))
|
18
invokeai/app/invocations/params.py
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Literal
|
||||||
|
from pydantic import Field
|
||||||
|
from .baseinvocation import BaseInvocation, BaseInvocationOutput, InvocationContext
|
||||||
|
from .math import IntOutput
|
||||||
|
|
||||||
|
# Pass-through parameter nodes - used by subgraphs
|
||||||
|
|
||||||
|
class ParamIntInvocation(BaseInvocation):
|
||||||
|
"""An integer parameter"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["param_int"] = "param_int"
|
||||||
|
a: int = Field(default=0, description="The integer value")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> IntOutput:
|
||||||
|
return IntOutput(a=self.a)
|
22
invokeai/app/invocations/prompt.py
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
from typing import Literal
|
||||||
|
|
||||||
|
from pydantic.fields import Field
|
||||||
|
|
||||||
|
from .baseinvocation import BaseInvocationOutput
|
||||||
|
|
||||||
|
|
||||||
|
class PromptOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output a prompt"""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["prompt"] = "prompt"
|
||||||
|
|
||||||
|
prompt: str = Field(default=None, description="The output prompt")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {
|
||||||
|
'required': [
|
||||||
|
'type',
|
||||||
|
'prompt',
|
||||||
|
]
|
||||||
|
}
|
56
invokeai/app/invocations/reconstruct.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
from typing import Literal, Union
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from invokeai.app.models.image import ImageField, ImageType
|
||||||
|
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
|
||||||
|
from .image import ImageOutput, build_image_output
|
||||||
|
|
||||||
|
class RestoreFaceInvocation(BaseInvocation):
|
||||||
|
"""Restores faces in an image."""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["restore_face"] = "restore_face"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(description="The input image")
|
||||||
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength of the restoration" )
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["restoration", "image"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
results = context.services.restoration.upscale_and_reconstruct(
|
||||||
|
image_list=[[image, 0]],
|
||||||
|
upscale=None,
|
||||||
|
strength=self.strength, # GFPGAN strength
|
||||||
|
save_original=False,
|
||||||
|
image_callback=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now
|
||||||
|
# TODO: can this return multiple results?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(image_type, image_name, results[0][0], metadata)
|
||||||
|
return build_image_output(
|
||||||
|
image_type=image_type,
|
||||||
|
image_name=image_name,
|
||||||
|
image=results[0][0]
|
||||||
|
)
|
60
invokeai/app/invocations/upscale.py
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Literal, Union
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from invokeai.app.models.image import ImageField, ImageType
|
||||||
|
from .baseinvocation import BaseInvocation, InvocationContext, InvocationConfig
|
||||||
|
from .image import ImageOutput, build_image_output
|
||||||
|
|
||||||
|
|
||||||
|
class UpscaleInvocation(BaseInvocation):
|
||||||
|
"""Upscales an image."""
|
||||||
|
#fmt: off
|
||||||
|
type: Literal["upscale"] = "upscale"
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField, None] = Field(description="The input image", default=None)
|
||||||
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||||
|
level: Literal[2, 4] = Field(default=2, description="The upscale level")
|
||||||
|
#fmt: on
|
||||||
|
|
||||||
|
|
||||||
|
# Schema customisation
|
||||||
|
class Config(InvocationConfig):
|
||||||
|
schema_extra = {
|
||||||
|
"ui": {
|
||||||
|
"tags": ["upscaling", "image"],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(
|
||||||
|
self.image.image_type, self.image.image_name
|
||||||
|
)
|
||||||
|
results = context.services.restoration.upscale_and_reconstruct(
|
||||||
|
image_list=[[image, 0]],
|
||||||
|
upscale=(self.level, self.strength),
|
||||||
|
strength=0.0, # GFPGAN strength
|
||||||
|
save_original=False,
|
||||||
|
image_callback=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now
|
||||||
|
# TODO: can this return multiple results?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(
|
||||||
|
context.graph_execution_state_id, self.id
|
||||||
|
)
|
||||||
|
|
||||||
|
metadata = context.services.metadata.build_metadata(
|
||||||
|
session_id=context.graph_execution_state_id, node=self
|
||||||
|
)
|
||||||
|
|
||||||
|
context.services.images.save(image_type, image_name, results[0][0], metadata)
|
||||||
|
return build_image_output(
|
||||||
|
image_type=image_type,
|
||||||
|
image_name=image_name,
|
||||||
|
image=results[0][0]
|
||||||
|
)
|
14
invokeai/app/invocations/util/choose_model.py
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
from invokeai.backend.model_management.model_manager import ModelManager
|
||||||
|
|
||||||
|
|
||||||
|
def choose_model(model_manager: ModelManager, model_name: str):
|
||||||
|
"""Returns the default model if the `model_name` not a valid model, else returns the selected model."""
|
||||||
|
if model_manager.valid_model(model_name):
|
||||||
|
model = model_manager.get_model(model_name)
|
||||||
|
else:
|
||||||
|
model = model_manager.get_model()
|
||||||
|
print(
|
||||||
|
f"* Warning: '{model_name}' is not a valid model name. Using default model \'{model['model_name']}\' instead."
|
||||||
|
)
|
||||||
|
|
||||||
|
return model
|
3
invokeai/app/models/exceptions.py
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
class CanceledException(Exception):
|
||||||
|
"""Execution canceled by user."""
|
||||||
|
pass
|
29
invokeai/app/models/image.py
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
from enum import Enum
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class ImageType(str, Enum):
|
||||||
|
RESULT = "results"
|
||||||
|
INTERMEDIATE = "intermediates"
|
||||||
|
UPLOAD = "uploads"
|
||||||
|
|
||||||
|
|
||||||
|
def is_image_type(obj):
|
||||||
|
try:
|
||||||
|
ImageType(obj)
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class ImageField(BaseModel):
|
||||||
|
"""An image field used for passing image objects between invocations"""
|
||||||
|
|
||||||
|
image_type: ImageType = Field(
|
||||||
|
default=ImageType.RESULT, description="The type of the image"
|
||||||
|
)
|
||||||
|
image_name: Optional[str] = Field(default=None, description="The name of the image")
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
schema_extra = {"required": ["image_type", "image_name"]}
|
56
invokeai/app/services/default_graphs.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
from ..invocations.latent import LatentsToImageInvocation, NoiseInvocation, TextToLatentsInvocation
|
||||||
|
from ..invocations.params import ParamIntInvocation
|
||||||
|
from .graph import Edge, EdgeConnection, ExposedNodeInput, ExposedNodeOutput, Graph, LibraryGraph
|
||||||
|
from .item_storage import ItemStorageABC
|
||||||
|
|
||||||
|
|
||||||
|
default_text_to_image_graph_id = '539b2af5-2b4d-4d8c-8071-e54a3255fc74'
|
||||||
|
|
||||||
|
|
||||||
|
def create_text_to_image() -> LibraryGraph:
|
||||||
|
return LibraryGraph(
|
||||||
|
id=default_text_to_image_graph_id,
|
||||||
|
name='t2i',
|
||||||
|
description='Converts text to an image',
|
||||||
|
graph=Graph(
|
||||||
|
nodes={
|
||||||
|
'width': ParamIntInvocation(id='width', a=512),
|
||||||
|
'height': ParamIntInvocation(id='height', a=512),
|
||||||
|
'3': NoiseInvocation(id='3'),
|
||||||
|
'4': TextToLatentsInvocation(id='4'),
|
||||||
|
'5': LatentsToImageInvocation(id='5')
|
||||||
|
},
|
||||||
|
edges=[
|
||||||
|
Edge(source=EdgeConnection(node_id='width', field='a'), destination=EdgeConnection(node_id='3', field='width')),
|
||||||
|
Edge(source=EdgeConnection(node_id='height', field='a'), destination=EdgeConnection(node_id='3', field='height')),
|
||||||
|
Edge(source=EdgeConnection(node_id='width', field='a'), destination=EdgeConnection(node_id='4', field='width')),
|
||||||
|
Edge(source=EdgeConnection(node_id='height', field='a'), destination=EdgeConnection(node_id='4', field='height')),
|
||||||
|
Edge(source=EdgeConnection(node_id='3', field='noise'), destination=EdgeConnection(node_id='4', field='noise')),
|
||||||
|
Edge(source=EdgeConnection(node_id='4', field='latents'), destination=EdgeConnection(node_id='5', field='latents')),
|
||||||
|
]
|
||||||
|
),
|
||||||
|
exposed_inputs=[
|
||||||
|
ExposedNodeInput(node_path='4', field='prompt', alias='prompt'),
|
||||||
|
ExposedNodeInput(node_path='width', field='a', alias='width'),
|
||||||
|
ExposedNodeInput(node_path='height', field='a', alias='height')
|
||||||
|
],
|
||||||
|
exposed_outputs=[
|
||||||
|
ExposedNodeOutput(node_path='5', field='image', alias='image')
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def create_system_graphs(graph_library: ItemStorageABC[LibraryGraph]) -> list[LibraryGraph]:
|
||||||
|
"""Creates the default system graphs, or adds new versions if the old ones don't match"""
|
||||||
|
|
||||||
|
graphs: list[LibraryGraph] = list()
|
||||||
|
|
||||||
|
text_to_image = graph_library.get(default_text_to_image_graph_id)
|
||||||
|
|
||||||
|
# TODO: Check if the graph is the same as the default one, and if not, update it
|
||||||
|
#if text_to_image is None:
|
||||||
|
text_to_image = create_text_to_image()
|
||||||
|
graph_library.set(text_to_image)
|
||||||
|
|
||||||
|
graphs.append(text_to_image)
|
||||||
|
|
||||||
|
return graphs
|
103
invokeai/app/services/events.py
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from typing import Any
|
||||||
|
from invokeai.app.api.models.images import ProgressImage
|
||||||
|
from invokeai.app.util.misc import get_timestamp
|
||||||
|
|
||||||
|
|
||||||
|
class EventServiceBase:
|
||||||
|
session_event: str = "session_event"
|
||||||
|
|
||||||
|
"""Basic event bus, to have an empty stand-in when not needed"""
|
||||||
|
|
||||||
|
def dispatch(self, event_name: str, payload: Any) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __emit_session_event(self, event_name: str, payload: dict) -> None:
|
||||||
|
payload["timestamp"] = get_timestamp()
|
||||||
|
self.dispatch(
|
||||||
|
event_name=EventServiceBase.session_event,
|
||||||
|
payload=dict(event=event_name, data=payload),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define events here for every event in the system.
|
||||||
|
# This will make them easier to integrate until we find a schema generator.
|
||||||
|
def emit_generator_progress(
|
||||||
|
self,
|
||||||
|
graph_execution_state_id: str,
|
||||||
|
node: dict,
|
||||||
|
source_node_id: str,
|
||||||
|
progress_image: ProgressImage | None,
|
||||||
|
step: int,
|
||||||
|
total_steps: int,
|
||||||
|
) -> None:
|
||||||
|
"""Emitted when there is generation progress"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="generator_progress",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
node=node,
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
progress_image=progress_image.dict() if progress_image is not None else None,
|
||||||
|
step=step,
|
||||||
|
total_steps=total_steps,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_invocation_complete(
|
||||||
|
self,
|
||||||
|
graph_execution_state_id: str,
|
||||||
|
result: dict,
|
||||||
|
node: dict,
|
||||||
|
source_node_id: str,
|
||||||
|
) -> None:
|
||||||
|
"""Emitted when an invocation has completed"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="invocation_complete",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
node=node,
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
result=result,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_invocation_error(
|
||||||
|
self,
|
||||||
|
graph_execution_state_id: str,
|
||||||
|
node: dict,
|
||||||
|
source_node_id: str,
|
||||||
|
error: str,
|
||||||
|
) -> None:
|
||||||
|
"""Emitted when an invocation has completed"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="invocation_error",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
node=node,
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
error=error,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_invocation_started(
|
||||||
|
self, graph_execution_state_id: str, node: dict, source_node_id: str
|
||||||
|
) -> None:
|
||||||
|
"""Emitted when an invocation has started"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="invocation_started",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
node=node,
|
||||||
|
source_node_id=source_node_id,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def emit_graph_execution_complete(self, graph_execution_state_id: str) -> None:
|
||||||
|
"""Emitted when a session has completed all invocations"""
|
||||||
|
self.__emit_session_event(
|
||||||
|
event_name="graph_execution_state_complete",
|
||||||
|
payload=dict(
|
||||||
|
graph_execution_state_id=graph_execution_state_id,
|
||||||
|
),
|
||||||
|
)
|
1194
invokeai/app/services/graph.py
Normal file
238
invokeai/app/services/image_storage.py
Normal file
@ -0,0 +1,238 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import os
|
||||||
|
from glob import glob
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from pathlib import Path
|
||||||
|
from queue import Queue
|
||||||
|
from typing import Dict, List, Tuple
|
||||||
|
|
||||||
|
from PIL.Image import Image
|
||||||
|
import PIL.Image as PILImage
|
||||||
|
from invokeai.app.api.models.images import ImageResponse, ImageResponseMetadata
|
||||||
|
from invokeai.app.models.image import ImageType
|
||||||
|
from invokeai.app.services.metadata import (
|
||||||
|
InvokeAIMetadata,
|
||||||
|
MetadataServiceBase,
|
||||||
|
build_invokeai_metadata_pnginfo,
|
||||||
|
)
|
||||||
|
from invokeai.app.services.item_storage import PaginatedResults
|
||||||
|
from invokeai.app.util.misc import get_timestamp
|
||||||
|
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
|
||||||
|
|
||||||
|
|
||||||
|
class ImageStorageBase(ABC):
|
||||||
|
"""Responsible for storing and retrieving images."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get(self, image_type: ImageType, image_name: str) -> Image:
|
||||||
|
"""Retrieves an image as PIL Image."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def list(
|
||||||
|
self, image_type: ImageType, page: int = 0, per_page: int = 10
|
||||||
|
) -> PaginatedResults[ImageResponse]:
|
||||||
|
"""Gets a paginated list of images."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
# TODO: make this a bit more flexible for e.g. cloud storage
|
||||||
|
@abstractmethod
|
||||||
|
def get_path(
|
||||||
|
self, image_type: ImageType, image_name: str, is_thumbnail: bool = False
|
||||||
|
) -> str:
|
||||||
|
"""Gets the path to an image or its thumbnail."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
# TODO: make this a bit more flexible for e.g. cloud storage
|
||||||
|
@abstractmethod
|
||||||
|
def validate_path(self, path: str) -> bool:
|
||||||
|
"""Validates an image path."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def save(
|
||||||
|
self,
|
||||||
|
image_type: ImageType,
|
||||||
|
image_name: str,
|
||||||
|
image: Image,
|
||||||
|
metadata: InvokeAIMetadata | None = None,
|
||||||
|
) -> Tuple[str, str, int]:
|
||||||
|
"""Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image path, thumbnail path, and created timestamp."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete(self, image_type: ImageType, image_name: str) -> None:
|
||||||
|
"""Deletes an image and its thumbnail (if one exists)."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def create_name(self, context_id: str, node_id: str) -> str:
|
||||||
|
"""Creates a unique contextual image filename."""
|
||||||
|
return f"{context_id}_{node_id}_{str(get_timestamp())}.png"
|
||||||
|
|
||||||
|
|
||||||
|
class DiskImageStorage(ImageStorageBase):
|
||||||
|
"""Stores images on disk"""
|
||||||
|
|
||||||
|
__output_folder: str
|
||||||
|
__cache_ids: Queue # TODO: this is an incredibly naive cache
|
||||||
|
__cache: Dict[str, Image]
|
||||||
|
__max_cache_size: int
|
||||||
|
__metadata_service: MetadataServiceBase
|
||||||
|
|
||||||
|
def __init__(self, output_folder: str, metadata_service: MetadataServiceBase):
|
||||||
|
self.__output_folder = output_folder
|
||||||
|
self.__cache = dict()
|
||||||
|
self.__cache_ids = Queue()
|
||||||
|
self.__max_cache_size = 10 # TODO: get this from config
|
||||||
|
self.__metadata_service = metadata_service
|
||||||
|
|
||||||
|
Path(output_folder).mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# TODO: don't hard-code. get/save/delete should maybe take subpath?
|
||||||
|
for image_type in ImageType:
|
||||||
|
Path(os.path.join(output_folder, image_type)).mkdir(
|
||||||
|
parents=True, exist_ok=True
|
||||||
|
)
|
||||||
|
Path(os.path.join(output_folder, image_type, "thumbnails")).mkdir(
|
||||||
|
parents=True, exist_ok=True
|
||||||
|
)
|
||||||
|
|
||||||
|
def list(
|
||||||
|
self, image_type: ImageType, page: int = 0, per_page: int = 10
|
||||||
|
) -> PaginatedResults[ImageResponse]:
|
||||||
|
dir_path = os.path.join(self.__output_folder, image_type)
|
||||||
|
image_paths = glob(f"{dir_path}/*.png")
|
||||||
|
count = len(image_paths)
|
||||||
|
|
||||||
|
sorted_image_paths = sorted(
|
||||||
|
glob(f"{dir_path}/*.png"), key=os.path.getctime, reverse=True
|
||||||
|
)
|
||||||
|
|
||||||
|
page_of_image_paths = sorted_image_paths[
|
||||||
|
page * per_page : (page + 1) * per_page
|
||||||
|
]
|
||||||
|
|
||||||
|
page_of_images: List[ImageResponse] = []
|
||||||
|
|
||||||
|
for path in page_of_image_paths:
|
||||||
|
filename = os.path.basename(path)
|
||||||
|
img = PILImage.open(path)
|
||||||
|
|
||||||
|
invokeai_metadata = self.__metadata_service.get_metadata(img)
|
||||||
|
|
||||||
|
page_of_images.append(
|
||||||
|
ImageResponse(
|
||||||
|
image_type=image_type.value,
|
||||||
|
image_name=filename,
|
||||||
|
# TODO: DiskImageStorage should not be building URLs...?
|
||||||
|
image_url=f"api/v1/images/{image_type.value}/{filename}",
|
||||||
|
thumbnail_url=f"api/v1/images/{image_type.value}/thumbnails/{os.path.splitext(filename)[0]}.webp",
|
||||||
|
# TODO: Creation of this object should happen elsewhere (?), just making it fit here so it works
|
||||||
|
metadata=ImageResponseMetadata(
|
||||||
|
created=int(os.path.getctime(path)),
|
||||||
|
width=img.width,
|
||||||
|
height=img.height,
|
||||||
|
invokeai=invokeai_metadata,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
page_count_trunc = int(count / per_page)
|
||||||
|
page_count_mod = count % per_page
|
||||||
|
page_count = page_count_trunc if page_count_mod == 0 else page_count_trunc + 1
|
||||||
|
|
||||||
|
return PaginatedResults[ImageResponse](
|
||||||
|
items=page_of_images,
|
||||||
|
page=page,
|
||||||
|
pages=page_count,
|
||||||
|
per_page=per_page,
|
||||||
|
total=count,
|
||||||
|
)
|
||||||
|
|
||||||
|
def get(self, image_type: ImageType, image_name: str) -> Image:
|
||||||
|
image_path = self.get_path(image_type, image_name)
|
||||||
|
cache_item = self.__get_cache(image_path)
|
||||||
|
if cache_item:
|
||||||
|
return cache_item
|
||||||
|
|
||||||
|
image = PILImage.open(image_path)
|
||||||
|
self.__set_cache(image_path, image)
|
||||||
|
return image
|
||||||
|
|
||||||
|
# TODO: make this a bit more flexible for e.g. cloud storage
|
||||||
|
def get_path(
|
||||||
|
self, image_type: ImageType, image_name: str, is_thumbnail: bool = False
|
||||||
|
) -> str:
|
||||||
|
# strip out any relative path shenanigans
|
||||||
|
basename = os.path.basename(image_name)
|
||||||
|
|
||||||
|
if is_thumbnail:
|
||||||
|
path = os.path.join(
|
||||||
|
self.__output_folder, image_type, "thumbnails", basename
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
path = os.path.join(self.__output_folder, image_type, basename)
|
||||||
|
|
||||||
|
return path
|
||||||
|
|
||||||
|
def validate_path(self, path: str) -> bool:
|
||||||
|
try:
|
||||||
|
os.stat(path)
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def save(
|
||||||
|
self,
|
||||||
|
image_type: ImageType,
|
||||||
|
image_name: str,
|
||||||
|
image: Image,
|
||||||
|
metadata: InvokeAIMetadata | None = None,
|
||||||
|
) -> Tuple[str, str, int]:
|
||||||
|
image_path = self.get_path(image_type, image_name)
|
||||||
|
|
||||||
|
# TODO: Reading the image and then saving it strips the metadata...
|
||||||
|
if metadata:
|
||||||
|
pnginfo = build_invokeai_metadata_pnginfo(metadata=metadata)
|
||||||
|
image.save(image_path, "PNG", pnginfo=pnginfo)
|
||||||
|
else:
|
||||||
|
image.save(image_path) # this saved image has an empty info
|
||||||
|
|
||||||
|
thumbnail_name = get_thumbnail_name(image_name)
|
||||||
|
thumbnail_path = self.get_path(image_type, thumbnail_name, is_thumbnail=True)
|
||||||
|
thumbnail_image = make_thumbnail(image)
|
||||||
|
thumbnail_image.save(thumbnail_path)
|
||||||
|
|
||||||
|
self.__set_cache(image_path, image)
|
||||||
|
self.__set_cache(thumbnail_path, thumbnail_image)
|
||||||
|
|
||||||
|
return (image_path, thumbnail_path, int(os.path.getctime(image_path)))
|
||||||
|
|
||||||
|
def delete(self, image_type: ImageType, image_name: str) -> None:
|
||||||
|
image_path = self.get_path(image_type, image_name)
|
||||||
|
thumbnail_path = self.get_path(image_type, image_name, True)
|
||||||
|
if os.path.exists(image_path):
|
||||||
|
os.remove(image_path)
|
||||||
|
|
||||||
|
if image_path in self.__cache:
|
||||||
|
del self.__cache[image_path]
|
||||||
|
|
||||||
|
if os.path.exists(thumbnail_path):
|
||||||
|
os.remove(thumbnail_path)
|
||||||
|
|
||||||
|
if thumbnail_path in self.__cache:
|
||||||
|
del self.__cache[thumbnail_path]
|
||||||
|
|
||||||
|
def __get_cache(self, image_name: str) -> Image:
|
||||||
|
return None if image_name not in self.__cache else self.__cache[image_name]
|
||||||
|
|
||||||
|
def __set_cache(self, image_name: str, image: Image):
|
||||||
|
if not image_name in self.__cache:
|
||||||
|
self.__cache[image_name] = image
|
||||||
|
self.__cache_ids.put(
|
||||||
|
image_name
|
||||||
|
) # TODO: this should refresh position for LRU cache
|
||||||
|
if len(self.__cache) > self.__max_cache_size:
|
||||||
|
cache_id = self.__cache_ids.get()
|
||||||
|
del self.__cache[cache_id]
|
68
invokeai/app/services/invocation_queue.py
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
import time
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from queue import Queue
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationQueueItem(BaseModel):
|
||||||
|
graph_execution_state_id: str = Field(description="The ID of the graph execution state")
|
||||||
|
invocation_id: str = Field(description="The ID of the node being invoked")
|
||||||
|
invoke_all: bool = Field(default=False)
|
||||||
|
timestamp: float = Field(default_factory=time.time)
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationQueueABC(ABC):
|
||||||
|
"""Abstract base class for all invocation queues"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get(self) -> InvocationQueueItem:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def put(self, item: InvocationQueueItem | None) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def cancel(self, graph_execution_state_id: str) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def is_canceled(self, graph_execution_state_id: str) -> bool:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryInvocationQueue(InvocationQueueABC):
|
||||||
|
__queue: Queue
|
||||||
|
__cancellations: dict[str, float]
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.__queue = Queue()
|
||||||
|
self.__cancellations = dict()
|
||||||
|
|
||||||
|
def get(self) -> InvocationQueueItem:
|
||||||
|
item = self.__queue.get()
|
||||||
|
|
||||||
|
while isinstance(item, InvocationQueueItem) \
|
||||||
|
and item.graph_execution_state_id in self.__cancellations \
|
||||||
|
and self.__cancellations[item.graph_execution_state_id] > item.timestamp:
|
||||||
|
item = self.__queue.get()
|
||||||
|
|
||||||
|
# Clear old items
|
||||||
|
for graph_execution_state_id in list(self.__cancellations.keys()):
|
||||||
|
if self.__cancellations[graph_execution_state_id] < item.timestamp:
|
||||||
|
del self.__cancellations[graph_execution_state_id]
|
||||||
|
|
||||||
|
return item
|
||||||
|
|
||||||
|
def put(self, item: InvocationQueueItem | None) -> None:
|
||||||
|
self.__queue.put(item)
|
||||||
|
|
||||||
|
def cancel(self, graph_execution_state_id: str) -> None:
|
||||||
|
if graph_execution_state_id not in self.__cancellations:
|
||||||
|
self.__cancellations[graph_execution_state_id] = time.time()
|
||||||
|
|
||||||
|
def is_canceled(self, graph_execution_state_id: str) -> bool:
|
||||||
|
return graph_execution_state_id in self.__cancellations
|
50
invokeai/app/services/invocation_services.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
from invokeai.app.services.metadata import MetadataServiceBase
|
||||||
|
from invokeai.backend import ModelManager
|
||||||
|
|
||||||
|
from .events import EventServiceBase
|
||||||
|
from .latent_storage import LatentsStorageBase
|
||||||
|
from .image_storage import ImageStorageBase
|
||||||
|
from .restoration_services import RestorationServices
|
||||||
|
from .invocation_queue import InvocationQueueABC
|
||||||
|
from .item_storage import ItemStorageABC
|
||||||
|
|
||||||
|
class InvocationServices:
|
||||||
|
"""Services that can be used by invocations"""
|
||||||
|
|
||||||
|
events: EventServiceBase
|
||||||
|
latents: LatentsStorageBase
|
||||||
|
images: ImageStorageBase
|
||||||
|
metadata: MetadataServiceBase
|
||||||
|
queue: InvocationQueueABC
|
||||||
|
model_manager: ModelManager
|
||||||
|
restoration: RestorationServices
|
||||||
|
|
||||||
|
# NOTE: we must forward-declare any types that include invocations, since invocations can use services
|
||||||
|
graph_library: ItemStorageABC["LibraryGraph"]
|
||||||
|
graph_execution_manager: ItemStorageABC["GraphExecutionState"]
|
||||||
|
processor: "InvocationProcessorABC"
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_manager: ModelManager,
|
||||||
|
events: EventServiceBase,
|
||||||
|
latents: LatentsStorageBase,
|
||||||
|
images: ImageStorageBase,
|
||||||
|
metadata: MetadataServiceBase,
|
||||||
|
queue: InvocationQueueABC,
|
||||||
|
graph_library: ItemStorageABC["LibraryGraph"],
|
||||||
|
graph_execution_manager: ItemStorageABC["GraphExecutionState"],
|
||||||
|
processor: "InvocationProcessorABC",
|
||||||
|
restoration: RestorationServices,
|
||||||
|
):
|
||||||
|
self.model_manager = model_manager
|
||||||
|
self.events = events
|
||||||
|
self.latents = latents
|
||||||
|
self.images = images
|
||||||
|
self.metadata = metadata
|
||||||
|
self.queue = queue
|
||||||
|
self.graph_library = graph_library
|
||||||
|
self.graph_execution_manager = graph_execution_manager
|
||||||
|
self.processor = processor
|
||||||
|
self.restoration = restoration
|
90
invokeai/app/services/invoker.py
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
# Copyright (c) 2022 Kyle Schouviller (https://github.com/kyle0654)
|
||||||
|
|
||||||
|
from abc import ABC
|
||||||
|
from threading import Event, Thread
|
||||||
|
|
||||||
|
from ..invocations.baseinvocation import InvocationContext
|
||||||
|
from .graph import Graph, GraphExecutionState
|
||||||
|
from .invocation_queue import InvocationQueueABC, InvocationQueueItem
|
||||||
|
from .invocation_services import InvocationServices
|
||||||
|
from .item_storage import ItemStorageABC
|
||||||
|
|
||||||
|
|
||||||
|
class Invoker:
|
||||||
|
"""The invoker, used to execute invocations"""
|
||||||
|
|
||||||
|
services: InvocationServices
|
||||||
|
|
||||||
|
def __init__(self, services: InvocationServices):
|
||||||
|
self.services = services
|
||||||
|
self._start()
|
||||||
|
|
||||||
|
def invoke(
|
||||||
|
self, graph_execution_state: GraphExecutionState, invoke_all: bool = False
|
||||||
|
) -> str | None:
|
||||||
|
"""Determines the next node to invoke and returns the id of the invoked node, or None if there are no nodes to execute"""
|
||||||
|
|
||||||
|
# Get the next invocation
|
||||||
|
invocation = graph_execution_state.next()
|
||||||
|
if not invocation:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Save the execution state
|
||||||
|
self.services.graph_execution_manager.set(graph_execution_state)
|
||||||
|
|
||||||
|
# Queue the invocation
|
||||||
|
self.services.queue.put(
|
||||||
|
InvocationQueueItem(
|
||||||
|
# session_id = session.id,
|
||||||
|
graph_execution_state_id=graph_execution_state.id,
|
||||||
|
invocation_id=invocation.id,
|
||||||
|
invoke_all=invoke_all,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return invocation.id
|
||||||
|
|
||||||
|
def create_execution_state(self, graph: Graph | None = None) -> GraphExecutionState:
|
||||||
|
"""Creates a new execution state for the given graph"""
|
||||||
|
new_state = GraphExecutionState(graph=Graph() if graph is None else graph)
|
||||||
|
self.services.graph_execution_manager.set(new_state)
|
||||||
|
return new_state
|
||||||
|
|
||||||
|
def cancel(self, graph_execution_state_id: str) -> None:
|
||||||
|
"""Cancels the given execution state"""
|
||||||
|
self.services.queue.cancel(graph_execution_state_id)
|
||||||
|
|
||||||
|
def __start_service(self, service) -> None:
|
||||||
|
# Call start() method on any services that have it
|
||||||
|
start_op = getattr(service, "start", None)
|
||||||
|
if callable(start_op):
|
||||||
|
start_op(self)
|
||||||
|
|
||||||
|
def __stop_service(self, service) -> None:
|
||||||
|
# Call stop() method on any services that have it
|
||||||
|
stop_op = getattr(service, "stop", None)
|
||||||
|
if callable(stop_op):
|
||||||
|
stop_op(self)
|
||||||
|
|
||||||
|
def _start(self) -> None:
|
||||||
|
"""Starts the invoker. This is called automatically when the invoker is created."""
|
||||||
|
for service in vars(self.services):
|
||||||
|
self.__start_service(getattr(self.services, service))
|
||||||
|
|
||||||
|
for service in vars(self.services):
|
||||||
|
self.__start_service(getattr(self.services, service))
|
||||||
|
|
||||||
|
def stop(self) -> None:
|
||||||
|
"""Stops the invoker. A new invoker will have to be created to execute further."""
|
||||||
|
# First stop all services
|
||||||
|
for service in vars(self.services):
|
||||||
|
self.__stop_service(getattr(self.services, service))
|
||||||
|
|
||||||
|
for service in vars(self.services):
|
||||||
|
self.__stop_service(getattr(self.services, service))
|
||||||
|
|
||||||
|
self.services.queue.put(None)
|
||||||
|
|
||||||
|
|
||||||
|
class InvocationProcessorABC(ABC):
|
||||||
|
pass
|