Compare commits
1134 Commits
v2.3.0-rc2
...
Model-Add-
Author | SHA1 | Date | |
---|---|---|---|
054e963bef | |||
afb66a7884 | |||
25ae36ceb5 | |||
3ae8daedaa | |||
b913e1e11e | |||
3c4b6d5735 | |||
e6123eac19 | |||
30ca25897e | |||
abaee6b9ed | |||
4d7c9e1ab7 | |||
cc5687f26c | |||
78e76f26f9 | |||
9a7580dedd | |||
dc2da8cff4 | |||
019a9f0329 | |||
fe5d9ad171 | |||
dbc0093b31 | |||
92e512b8b6 | |||
dc14701d20 | |||
737e0f3085 | |||
81b7ea4362 | |||
09dfde0ba1 | |||
3ba7e966b5 | |||
a1cd4834d1 | |||
a724038dc6 | |||
4221cf7731 | |||
c34ac91ff0 | |||
5fe38f7c88 | |||
bd7e515290 | |||
076fac07eb | |||
9348161600 | |||
dac3c158a5 | |||
17d8bbf330 | |||
9344687a56 | |||
cf534d735c | |||
501924bc60 | |||
d117251747 | |||
6ea61a8486 | |||
e4d903af20 | |||
2d9797da35 | |||
07ea806553 | |||
5ac0316c62 | |||
9536ba22af | |||
5503749085 | |||
9bfe2fa371 | |||
d8ce6e4426 | |||
43d2d6d98c | |||
64c233efd4 | |||
2245a4e117 | |||
9ceec40b76 | |||
0f13b90059 | |||
d91fc16ae4 | |||
bc01a96f9d | |||
85b2822f5e | |||
c33d8694bb | |||
685bd027f0 | |||
f592d620d5 | |||
2b127b73ac | |||
8855902cfe | |||
9d8ddc6a08 | |||
4ca5189e73 | |||
873597cb84 | |||
44d742f232 | |||
6e7dbf99f3 | |||
1ba1076888 | |||
cafa108f69 | |||
deeff36e16 | |||
d770b14358 | |||
20414ba4ad | |||
92721a1d45 | |||
f329fddab9 | |||
f2efde27f6 | |||
02c58f22be | |||
f751dcd245 | |||
a97107bd90 | |||
b2ce45a417 | |||
4e0b5d85ba | |||
a958ae5e29 | |||
4d50fbf8dc | |||
485f6e5954 | |||
1f6ce838ba | |||
0dc5773849 | |||
bc347f749c | |||
1b215059e7 | |||
db079a2733 | |||
26f71d3536 | |||
eb7ae2588c | |||
278c14ba2e | |||
74e83dda54 | |||
28c1fca477 | |||
1f0324102a | |||
a782ad092d | |||
eae4eb419a | |||
fb7f38f46e | |||
93d0cae455 | |||
35f6b5d562 | |||
2aefa06ef1 | |||
5906888477 | |||
f22c7d0da6 | |||
93b38707b2 | |||
6ecf53078f | |||
9c93b7cb59 | |||
7789e8319c | |||
7d7a28beb3 | |||
27a113d872 | |||
67f8f222d9 | |||
5347c12fed | |||
b194180f76 | |||
fb30b7d17a | |||
c341dcaa3d | |||
b695a2574b | |||
aa68a326c8 | |||
c2922d5991 | |||
85888030c3 | |||
7cf59c1e60 | |||
9738b0ff69 | |||
3021c78390 | |||
6eeaf8d9fb | |||
fa9afec0c2 | |||
d6862bf8c1 | |||
de01c38bbe | |||
7e811908e0 | |||
5f59f24f92 | |||
e414fcf3fb | |||
079ad8f35a | |||
a4d7e0c78e | |||
e9c2f173c5 | |||
44f489d581 | |||
cb48bbd806 | |||
0a761d7c43 | |||
a0f47aa72e | |||
f9abc6fc85 | |||
d840c597b5 | |||
3ca654d256 | |||
e0e01f6c50 | |||
d9dab1b6c7 | |||
3b2ef6e1a8 | |||
c125a3871a | |||
0996bd5acf | |||
ea77d557da | |||
1b01161ea4 | |||
2230cb9562 | |||
9e0c7c46a2 | |||
be305588d3 | |||
9f994df814 | |||
3062580006 | |||
596ba754b1 | |||
b980e563b9 | |||
7fe2606cb3 | |||
0c3b1fe3c4 | |||
c9ee2e351c | |||
e3aef20f42 | |||
60614badaf | |||
288cee9611 | |||
24aca37538 | |||
b853ceea65 | |||
3ee2798ede | |||
5c5106c14a | |||
c367b21c71 | |||
2eef6df66a | |||
300aa8d86c | |||
727f1638d7 | |||
ee6df5852a | |||
90525b1c43 | |||
bbb95dbc5b | |||
f4b7f80d59 | |||
220f7373c8 | |||
4bb5785f29 | |||
f9a7a7d161 | |||
de94c780d9 | |||
0b9230380c | |||
209a55b681 | |||
dc2f69f5d1 | |||
ad2f1b7b36 | |||
dd2d96a50f | |||
2bff28e305 | |||
d68234d879 | |||
b3babf26a5 | |||
ecca0eff31 | |||
28677f9621 | |||
caecfadf11 | |||
5cf8e3aa53 | |||
76cf2c61db | |||
b4d976f2db | |||
777d127c74 | |||
0678803803 | |||
d2fbc9f5e3 | |||
d81088dff7 | |||
1aaad9336f | |||
1f3c024d9d | |||
74a480f94e | |||
c6e8d3269c | |||
dcb5a3a740 | |||
c0ef546b02 | |||
7a78a83651 | |||
10cbf99310 | |||
b63aefcda9 | |||
6a77634b34 | |||
8ca91b1774 | |||
1c9d9e79d5 | |||
3aa1ee1218 | |||
06aa5a8120 | |||
580f9ecded | |||
270032670a | |||
4f056cdb55 | |||
c14241436b | |||
50b56d6088 | |||
8ec2ae7954 | |||
40d82b29cf | |||
0b953d98f5 | |||
8833d76709 | |||
027b316fd2 | |||
d612f11c11 | |||
250b0ab182 | |||
675dd12b6c | |||
7e76eea059 | |||
f45483e519 | |||
65047bf976 | |||
d586a82a53 | |||
28709961e9 | |||
e9f237f39d | |||
4156bfd810 | |||
fe75b95464 | |||
95954188b2 | |||
63f59201f8 | |||
370e8281b3 | |||
685df33584 | |||
4332c9c7a6 | |||
4a00f1cc74 | |||
7ff77504cb | |||
0d1854e44a | |||
fe6858f2d9 | |||
12c7db3a16 | |||
3ecdec02bf | |||
d6c24d59b0 | |||
bb3d1bb6cb | |||
14c8738a71 | |||
1a829bb998 | |||
9d339e94f2 | |||
ad7b1fa6fb | |||
42355b70c2 | |||
faa2558e2f | |||
081397737b | |||
55d36eaf4f | |||
26cd1728ac | |||
a0065da4a4 | |||
c11e823ff3 | |||
197e50a298 | |||
507e12520e | |||
2cc04de397 | |||
f4150a7829 | |||
5418bd3b24 | |||
76d5fa4694 | |||
386dda8233 | |||
8076c1697c | |||
65fc9a6e0e | |||
cde0b6ae8d | |||
b12760b976 | |||
b679a6ba37 | |||
2f5f08c35d | |||
8f48c14ed4 | |||
5d37fa6e36 | |||
f51581bd1b | |||
50ca6b6ffc | |||
63b9ec4c5e | |||
b115bc4247 | |||
dadc30f795 | |||
111d8391e2 | |||
1157b454b2 | |||
8a6473610b | |||
ea7911be89 | |||
9ee648e0c3 | |||
543682fd3b | |||
88cb63e4a1 | |||
76212d1cca | |||
a8df9e5122 | |||
2db180d909 | |||
b716fe8f06 | |||
69e2dc0404 | |||
a38b75572f | |||
e18de761b6 | |||
816ea39827 | |||
1cd4cdd0e5 | |||
768e969c90 | |||
57db66634d | |||
87789c1de8 | |||
c3c1511ec6 | |||
6b41127421 | |||
d232a439f7 | |||
c04f21e83e | |||
8762069b37 | |||
d9ebdd2684 | |||
3e4c10ef9c | |||
17eb2ca5a2 | |||
63725d7534 | |||
00f30ea457 | |||
1b2a3c7144 | |||
01a1777370 | |||
32945c7f45 | |||
b0b8846430 | |||
fdb146a43a | |||
42c1f1fc9d | |||
89a8ef86b5 | |||
f0fb767f57 | |||
4bd93464bf | |||
3d3de82ca9 | |||
c3ff9e6be8 | |||
21f79e5919 | |||
0342e25c74 | |||
91f982fb0b | |||
b9ab43a4bb | |||
6e0e48bf8a | |||
dcc8313dbf | |||
bf5831faa3 | |||
5eff035f55 | |||
7c60068388 | |||
d843fb078a | |||
41b2e4633f | |||
57144ac0cf | |||
a305b6adbf | |||
94daaa4abf | |||
901337186d | |||
7e2f64f60b | |||
126cba2324 | |||
2f9dcd7906 | |||
e537b5d8e1 | |||
e0e70c9222 | |||
1b21e5df54 | |||
4b76af37ae | |||
486c445afb | |||
4547c48013 | |||
8f21201c91 | |||
532b74a206 | |||
0b184913b9 | |||
97719e40e4 | |||
5ad3062b66 | |||
92d012a92d | |||
fc187f263e | |||
fd94f85abe | |||
4e9e1b660d | |||
d01adedff5 | |||
c247f430f7 | |||
3d6a358042 | |||
4d1dcd11de | |||
b33655b0d6 | |||
81dee04dc9 | |||
114018e3e6 | |||
ef8cf83b28 | |||
633857b0e3 | |||
214574d11f | |||
8584665ade | |||
516c56d0c5 | |||
5891b43ce2 | |||
62e75f95aa | |||
b07621e27e | |||
545d8968fd | |||
7cf2f58513 | |||
618e3e5e91 | |||
c703b60986 | |||
7c0ce5c282 | |||
82fe34b1f7 | |||
65f9aae81d | |||
2d9fac23e7 | |||
ebc4b52f41 | |||
c4e6d4b348 | |||
eab32bce6c | |||
55d2094094 | |||
a0d50a2b23 | |||
9efeb1b2ec | |||
86e2cb0428 | |||
53c2c0f91d | |||
bdc7b8b75a | |||
1bfdd54810 | |||
b4bf6c12a5 | |||
ab35c241c2 | |||
b3dccfaeb6 | |||
6477e31c1e | |||
dd4a1c998b | |||
70203e6e5a | |||
d778a7c5ca | |||
f8e59636cd | |||
2d1a0b0a05 | |||
c9b2234d90 | |||
82b224539b | |||
0b15ffb95b | |||
ce9aaab22f | |||
3f53f1186d | |||
c0aff396d2 | |||
955900507f | |||
d606abc544 | |||
44400d2a66 | |||
60a98cacef | |||
6a990565ff | |||
3f0b0f3250 | |||
1a7371ea17 | |||
850d1ee984 | |||
2c7928b163 | |||
87d1ec6a4c | |||
53c62537f7 | |||
418d93fdfd | |||
f2ce2f1778 | |||
5b6c61fc75 | |||
1d77581d96 | |||
3b921cf393 | |||
d334f7f1f6 | |||
8c9764476c | |||
b7d5a3e0b5 | |||
e0405031a7 | |||
ee24b686b3 | |||
835eb14c79 | |||
9aadf7abc1 | |||
243f9e8377 | |||
6e0c6d9cc9 | |||
a3076cf951 | |||
6696882c71 | |||
17b039e85d | |||
81539e6ab4 | |||
92304b9f8a | |||
ec1de5ae8b | |||
49198a61ef | |||
c22d529528 | |||
8c5773abc1 | |||
cd98d88fe7 | |||
34e3aa1f88 | |||
49ffb64ef3 | |||
ec14e2db35 | |||
5725fcb3e0 | |||
1447b6df96 | |||
e700da23d8 | |||
b4ed8bc47a | |||
bd85e00530 | |||
4e446130d8 | |||
4c93b514bb | |||
d078941316 | |||
230d3a496d | |||
ec2890c19b | |||
a540cc537f | |||
39c57aa358 | |||
01f8c37bd3 | |||
2d990c1f54 | |||
7fb2da8741 | |||
b7718985d5 | |||
c69fcb1c10 | |||
90cda11868 | |||
0982548e1f | |||
5cb877e096 | |||
11a29fdc4d | |||
24407048a5 | |||
a7c2333312 | |||
b5b541c747 | |||
ad6ea02c9c | |||
1a6ed85d99 | |||
a094bbd839 | |||
73dda812ea | |||
8eaf1c4033 | |||
4f44b64052 | |||
c559bf3e10 | |||
a485515bc6 | |||
2c9b29725b | |||
28612c899a | |||
88acbeaa35 | |||
46729efe95 | |||
b3d03e1146 | |||
e29c9a7d9e | |||
9b157b6532 | |||
10a1e7962b | |||
cb672d7d00 | |||
e791fb6b0b | |||
1c9001ad21 | |||
3083356cf0 | |||
179814e50a | |||
9515c07fca | |||
a45e94fde7 | |||
8b6196e0a2 | |||
ee2c0ab51b | |||
ca5f129902 | |||
cf2eca7c60 | |||
16aea1e869 | |||
75ff6cd3c3 | |||
7b7b31637c | |||
fca564c18a | |||
eb8d87e185 | |||
dbadb1d7b5 | |||
a4afb69615 | |||
8b7925edf3 | |||
168a51c5a6 | |||
3f5d8c3e44 | |||
609bb19573 | |||
d561d6d3dd | |||
7ffaa17551 | |||
97eac58a50 | |||
cedbe8fcd7 | |||
a461875abd | |||
ab018ccdfe | |||
d41dcdfc46 | |||
972aecc4c5 | |||
6b7be4e5dc | |||
9b1a7b553f | |||
7f99efc5df | |||
0a6d8b4855 | |||
5e41811fb5 | |||
5a4967582e | |||
1d0ba4a1a7 | |||
4878c7a2d5 | |||
9e5aa645a7 | |||
d01e23973e | |||
71bbd78574 | |||
fff41a7349 | |||
d5f524a156 | |||
3ab9d02883 | |||
27a2e27c3a | |||
da04b11a31 | |||
3795b40f63 | |||
9436f2e3d1 | |||
7fadd5e5c4 | |||
4c2a588e1f | |||
5f9de762ff | |||
91f7abb398 | |||
6420b81a5d | |||
b6ed5eafd6 | |||
694d5aa2e8 | |||
833079140b | |||
fd27948c36 | |||
1dfaaa2a57 | |||
bac6b50dd1 | |||
a30c91f398 | |||
17294bfa55 | |||
3fa1771cc9 | |||
f3bd386ff0 | |||
8486ce31de | |||
1d9845557f | |||
55dce6cfdd | |||
58be915446 | |||
dc9268f772 | |||
47ddc00c6a | |||
0d22fd59ed | |||
d5efd57c28 | |||
b52a92da7e | |||
b949162e7e | |||
5409991256 | |||
be1bcbc173 | |||
d6196e863d | |||
63e790b79b | |||
cf53bba99e | |||
ed4c8f6a8a | |||
aab8263c31 | |||
b21bd6f428 | |||
cb6903dfd0 | |||
cd87ca8214 | |||
58e5bf5a58 | |||
f17c7ca6f7 | |||
c3dd28cff9 | |||
db4e1e8b53 | |||
3e43c3e698 | |||
cc7733af1c | |||
2a29734a56 | |||
f2e533f7c8 | |||
078f897b67 | |||
8352ab2076 | |||
1a3d47814b | |||
e852ad0a51 | |||
136cd0e868 | |||
7afe26320a | |||
702da71515 | |||
b313cf8afd | |||
852d78d9ad | |||
5570a88858 | |||
cfd897874b | |||
1249147c57 | |||
eec5c3bbb1 | |||
ca8d9fb885 | |||
7d77fb9691 | |||
a4c0dfb33c | |||
2dded68267 | |||
172ce3dc25 | |||
6c8d4b091e | |||
7beebc3659 | |||
5461318eda | |||
d0abe13b60 | |||
aca9d74489 | |||
a0c213a158 | |||
740210fc99 | |||
ca10d0652f | |||
e1a85d8184 | |||
9d8236c59d | |||
7eafcd47a6 | |||
ded3f13a33 | |||
e5646d7241 | |||
79ac9698c1 | |||
d29f57c93d | |||
9b7cde8918 | |||
8ae71303a5 | |||
2cd7bd4a8e | |||
b813298f2a | |||
58f787f7d4 | |||
2bba543d20 | |||
d3c1b747ee | |||
b9ecf93ba3 | |||
487da8394d | |||
4c93bc56f8 | |||
727dfeae43 | |||
88d561dee7 | |||
7a379f1d4f | |||
3ad89f99d2 | |||
d76c5da514 | |||
da5b0673e7 | |||
d7180afe9d | |||
2e9c15711b | |||
e19b08b149 | |||
234d76a269 | |||
826d941068 | |||
34e449213c | |||
671c5943e4 | |||
16c24ec367 | |||
e8240855e0 | |||
a5e065048e | |||
a53c3269db | |||
8bf93d3a32 | |||
d42cc0fd1c | |||
d2553d783c | |||
10b747d22b | |||
1d567fa593 | |||
3a3dd39d3a | |||
f4b3d7dba2 | |||
de2c7fd372 | |||
b140e1c619 | |||
1308584289 | |||
2ac4778bcf | |||
6101d67dba | |||
3cd50fe3a1 | |||
e683b574d1 | |||
0decd05913 | |||
d01b7ea2d2 | |||
4fa91724d9 | |||
e3d1c64b77 | |||
17f35a7bba | |||
ab2f0a6fbf | |||
41cbf2f7c4 | |||
d5d2e1d7a3 | |||
587faa3e52 | |||
80229ab73e | |||
68b2911d2f | |||
2bf2f627e4 | |||
58676b2ce2 | |||
11f79dc1e1 | |||
2a095ddc8e | |||
dd849d2e91 | |||
8c63fac958 | |||
11a70e9764 | |||
33ce78e4a2 | |||
4f78518858 | |||
fad99ac4d2 | |||
423b592b25 | |||
8aa7d1da55 | |||
6b702c32ca | |||
767012aec0 | |||
2267057e2b | |||
b8212e4dea | |||
5b7e4a5f5d | |||
07f9fa63d0 | |||
1ae8986451 | |||
b305c240de | |||
248dc81ec3 | |||
ebe0071ed2 | |||
7a518218e5 | |||
fc14ac7faa | |||
95e2739c47 | |||
f129393a2e | |||
c55bbd1a85 | |||
ccba41cdb2 | |||
3d442bbf22 | |||
4888d0d832 | |||
47de3fb007 | |||
41bc160cb8 | |||
d0ba155c19 | |||
5f0848bf7d | |||
6551527fe2 | |||
159ce2ea08 | |||
3715570d17 | |||
65a7432b5a | |||
557e28f460 | |||
62a7f252f5 | |||
2fa14200aa | |||
0605cf94f0 | |||
d69156c616 | |||
0963bbbe78 | |||
f3351a5e47 | |||
f3f4c68acc | |||
5d617ce63d | |||
8a0d45ac5a | |||
2468ba7445 | |||
65b7d2db47 | |||
e07f1bb89c | |||
f4f813d108 | |||
6217edcb6c | |||
c5cc832304 | |||
a76038bac4 | |||
ff4942f9b4 | |||
1ccad64871 | |||
19f0022bbe | |||
ecc7b7a700 | |||
e46102124e | |||
314ed7d8f6 | |||
b1341bc611 | |||
07be605dcb | |||
fe318775c3 | |||
1bb07795d8 | |||
caf07479ec | |||
508780d07f | |||
05e67e924c | |||
fb2488314f | |||
062f58209b | |||
7cb9d6b1a6 | |||
fb721234ec | |||
92906aeb08 | |||
cab41f0538 | |||
5d0dcaf81e | |||
9591c8d4e0 | |||
bcb1fbe031 | |||
e87a2fe14b | |||
d00571b5a4 | |||
b08a514594 | |||
265ccaca4a | |||
7aa6c827f7 | |||
093174942b | |||
f299f40763 | |||
7545e38655 | |||
0bc55a0d55 | |||
d38e7170fe | |||
15a9412255 | |||
e29399e032 | |||
bc18a94d8c | |||
5d2bdd478c | |||
9cacba916b | |||
628e82fa79 | |||
fbbbba2fac | |||
9cbf9d52b4 | |||
fb35fe1a41 | |||
b60b5750af | |||
3ff40114fa | |||
71c6ae8789 | |||
d9a7536fa8 | |||
99f4417cd7 | |||
47f94bde04 | |||
197e6b95e3 | |||
8e47ca8d57 | |||
714fff39ba | |||
89239d1c54 | |||
c03d98cf46 | |||
d1ad46d6f1 | |||
6ae7560f66 | |||
e561d19206 | |||
9eed1919c2 | |||
b87f7b1129 | |||
7410a60208 | |||
7c86130a3d | |||
58a1d9aae0 | |||
24e32f6ae2 | |||
3dd7393984 | |||
f18f743d03 | |||
c660dcdfcd | |||
9e0250c0b4 | |||
08c747f1e0 | |||
04ae6fde80 | |||
b1a53c8ef0 | |||
cd64511f24 | |||
1e98e0b159 | |||
4f7af55bc3 | |||
d0e6a57e48 | |||
d28a486769 | |||
84722d92f6 | |||
8a3b5ac21d | |||
717d53a773 | |||
96926d6648 | |||
f3639de8b1 | |||
b71e675e8d | |||
d3c850104b | |||
c00155f6a4 | |||
8753070fc7 | |||
ed8f9f021d | |||
3ccc705396 | |||
11e422cf29 | |||
7f695fed39 | |||
310501cd8a | |||
106b3aea1b | |||
6e52ca3307 | |||
94c31f672f | |||
240bbb9852 | |||
8cf2ed91a9 | |||
7be5b4ca8b | |||
d589ad96aa | |||
097e41e8d2 | |||
4cf43b858d | |||
13a4666a6e | |||
9232290950 | |||
f3153d45bc | |||
d9cb6da951 | |||
17535d887f | |||
35da7f5b96 | |||
4e95a68582 | |||
9dfeb93f80 | |||
02247ffc79 | |||
48da030415 | |||
817e04bee0 | |||
e5d0b0c37d | |||
950f450665 | |||
f5d1fbd896 | |||
424cee63f1 | |||
79daf8b039 | |||
383cbca896 | |||
07c55d5e2a | |||
156151df45 | |||
03b1d71af9 | |||
da193ecd4a | |||
56fd202e21 | |||
29454a2974 | |||
c977d295f5 | |||
28eaffa188 | |||
3feff09fb3 | |||
158d1ef384 | |||
f6ad107fdd | |||
e2c392631a | |||
4a1b4d63ef | |||
83ecda977c | |||
9601febef8 | |||
0503680efa | |||
57ccec1df3 | |||
22f3634481 | |||
5590c73af2 | |||
1f76b30e54 | |||
4785a1cd05 | |||
8bd04654c7 | |||
2876c4ddec | |||
0dce3188cc | |||
106c7aa956 | |||
b04f199035 | |||
a2b992dfd1 | |||
745e253a78 | |||
2ea551d37d | |||
8d1481ca10 | |||
307e7e00c2 | |||
4bce81de26 | |||
c3ad1c8a9f | |||
05d51d7b5b | |||
09f69a4d28 | |||
a338af17c8 | |||
bc82fc0cdd | |||
418a3d6e41 | |||
fbcc52ec3d | |||
47e89f4ba1 | |||
12d15a1a3f | |||
888d3ae968 | |||
a28120abdd | |||
2aad4dab90 | |||
4493d83aea | |||
eff0fb9a69 | |||
c19107e0a8 | |||
eaf29e1751 | |||
d964374a91 | |||
9826f80d7f | |||
ec89bd19dc | |||
23aaf54f56 | |||
6d3cc25bca | |||
c9d246c4ec | |||
74406456f2 | |||
8e0cd2df18 | |||
4d4b1777db | |||
d6e5da6e37 | |||
5bb0f9bedc | |||
dec7d8b160 | |||
4ecf016ace | |||
4d74af2363 | |||
c6a2ba12e2 | |||
350b5205a3 | |||
06028e0131 | |||
c6d13e679f | |||
72357266a6 | |||
9d69843a9d | |||
0547d20b2f | |||
2af6b8fbd8 | |||
0cee72dba5 | |||
77c11a42ee | |||
bf812e6493 | |||
a3da12d867 | |||
1d62b4210f | |||
d5a3571c00 | |||
8b2ed9b8fd | |||
24792eb5da | |||
614220576f | |||
70bcbc7401 | |||
492605ac3e | |||
67f892455f | |||
ae689d1a4a | |||
10990799db | |||
c5b4397212 | |||
f62bbef9f7 | |||
6b4a06c3fc | |||
9157da8237 | |||
9c2b9af3a8 | |||
3833b28132 | |||
e3419c82e8 | |||
65f3d22649 | |||
39b0288595 | |||
13d12a0ceb | |||
b92dc8db83 | |||
b49188a39d | |||
b9c8270ee6 | |||
f0f3520bca | |||
e8f9ab82ed | |||
6ab364b16a | |||
a4dc11addc | |||
0372702eb4 | |||
aa8eeea478 | |||
e54ecc4c37 | |||
4a12c76097 | |||
be72faf78e | |||
28d44d80ed | |||
9008d9996f | |||
be2a9b78bb | |||
70003ee5b1 | |||
45a5ccba84 | |||
f80a64a0f4 | |||
511df2963b | |||
f92f62a91b | |||
3efe9899c2 | |||
bdbe4660fc | |||
8af9432f63 | |||
668d9cdb9d | |||
90f5811e59 | |||
15d21206a3 | |||
b622286f17 | |||
176add58b2 | |||
33c5f5a9c2 | |||
2b7752b72e | |||
5478d2a15e | |||
9ad76fe80c | |||
d74c4009cb | |||
ffe0e81ec9 | |||
bdf683ec41 | |||
7f41893da4 | |||
42da4f57c2 | |||
c2e11dfe83 | |||
17e1930229 | |||
bde94347d3 | |||
b1612afff4 | |||
1d10d952b2 | |||
9150f9ef3c | |||
7bc0f7cc6c | |||
c52d11b24c | |||
59486615dd | |||
f0212cd361 | |||
ee4cb5fdc9 | |||
75b919237b | |||
07a9062e1f | |||
cdb3e18b80 | |||
28a5424242 | |||
8d418af20b | |||
055badd611 | |||
944f9e98a7 | |||
fcffcf5602 | |||
f121dfe120 | |||
a7dd7b4298 | |||
d94780651c | |||
d26abd7f01 | |||
7e2b122105 | |||
8a21fc1c50 | |||
275d5040f4 | |||
1b5930dcad | |||
d5810f6270 | |||
ebc51dc535 | |||
ac6e9238f1 | |||
01eb93d664 | |||
89f69c2d94 | |||
dc6f6fcab7 | |||
6343b245ef | |||
8c80da2844 | |||
a12189e088 | |||
472c97e4e8 | |||
5baf0ae755 | |||
a56e3014a4 | |||
f3eff38f90 | |||
53d2d34b3d | |||
ede7d1a8f7 | |||
ac23a321b0 | |||
f52b233205 | |||
8242fc8bad | |||
09b6f7572b | |||
bde6e96800 | |||
13474e985b | |||
28b40bebbe | |||
1c9fd00f98 | |||
8ab66a211c | |||
bc03ff8b30 | |||
0247d63511 | |||
7604b36577 | |||
4a026bd46e | |||
6241fc19e0 | |||
25d7d71dd8 | |||
2432adb38f | |||
91acae30bf | |||
ca749b7de1 | |||
7486aa8608 | |||
0402766f4d | |||
a9ef5d1532 | |||
a485d45400 | |||
a40bdef29f | |||
fc2670b4d6 | |||
f0cd1aa736 | |||
c3807b044d | |||
b7ab025f40 | |||
633f702b39 | |||
3969637488 | |||
658ef829d4 | |||
0240656361 | |||
719a5de506 | |||
05bb9e444b | |||
0076757767 | |||
6ab03c4d08 | |||
142016827f | |||
466a82bcc2 | |||
05349f6cdc | |||
ab585aefae | |||
083ce9358b | |||
f56cf2400a | |||
5de5e659d0 | |||
fc53f6d47c | |||
2f70daef8f | |||
fc2a136eb0 | |||
ce3da40434 | |||
7933f27a72 | |||
1c197c602f | |||
90656aa7bf | |||
394b4a771e | |||
9c3f548900 | |||
5662d2daa8 | |||
fc0f966ad2 | |||
eb702a5049 | |||
1386d73302 | |||
6089f33e54 | |||
3a260cf54f | |||
9949a438f4 | |||
84c1122208 | |||
cc3d431928 | |||
c44b060a2e | |||
eff7fb89d8 | |||
cd5c112fcd | |||
563867fa99 | |||
2e230774c2 | |||
9577410be4 | |||
4ada4c9f1f | |||
9a6966924c | |||
0d62525f3d | |||
2ec864e37e | |||
9307ce3dc3 | |||
15996446e0 | |||
7a06c8fd89 | |||
4895fe8395 | |||
1e793a2dfe | |||
9c8fcaaf86 | |||
bf4344be51 | |||
f7532cdfd4 | |||
f1dd76c20b | |||
3016eeb6fb | |||
75b62d6ca8 | |||
82ae2769c8 | |||
61149abd2f | |||
eff126af6e | |||
0ca499cf96 | |||
3abf85e658 | |||
5095285854 | |||
93623a4449 | |||
0197459b02 | |||
1578bc68cc | |||
4ace397a99 | |||
d85a710211 | |||
536d534ab4 | |||
fc752a4e75 | |||
3c06d114c3 | |||
00d79c1fe3 | |||
60213893ab | |||
3b58413d9f | |||
1139884493 | |||
17e8f966d0 | |||
a42b25339f | |||
1b0731dd1a | |||
61c3886843 | |||
f76d57637e | |||
6bf73a0cf9 | |||
5145df21d9 | |||
e96ac61cb3 | |||
0e35d829c1 | |||
d08f048621 | |||
cfd453c1c7 | |||
6ca177e462 | |||
a1b1a48fb3 | |||
b5160321bf | |||
0cc2a8176e | |||
9ac81c1dc4 | |||
50191774fc | |||
fcd9b813e3 | |||
813f92a1ae | |||
0d141c1d84 | |||
2e3cd03b27 | |||
4500c8b244 | |||
d569c9dec6 | |||
01a2b8c05b | |||
b23664c794 | |||
f06fefcacc | |||
7fa3a499bb | |||
c50b64ec1d | |||
76b0bdb6f9 | |||
b0ad109886 | |||
66b312c353 | |||
fc857f9d91 | |||
d6bd0cbf61 | |||
a32f6e9ea7 | |||
b41342a779 | |||
7603c8982c | |||
d351e365d6 | |||
d453afbf6b | |||
9ae55c91cc | |||
9e46badc40 | |||
ca0f3ec0e4 | |||
4b9be6113d | |||
31964c7c4c | |||
64f9fbda2f | |||
3ece2f19f0 | |||
c38b0b906d | |||
c79678a643 | |||
2217998010 | |||
3b43f3a5a1 | |||
9fe660c515 | |||
16356d5225 | |||
b9aef33ae8 |
@ -1,18 +1,25 @@
|
|||||||
|
# use this file as a whitelist
|
||||||
*
|
*
|
||||||
!assets/caution.png
|
!invokeai
|
||||||
!backend
|
|
||||||
!frontend/dist
|
|
||||||
!ldm
|
!ldm
|
||||||
!pyproject.toml
|
!pyproject.toml
|
||||||
!README.md
|
|
||||||
!scripts
|
# ignore frontend/web but whitelist dist
|
||||||
|
invokeai/frontend/web/
|
||||||
|
!invokeai/frontend/web/dist/
|
||||||
|
|
||||||
|
# ignore invokeai/assets but whitelist invokeai/assets/web
|
||||||
|
invokeai/assets/
|
||||||
|
!invokeai/assets/web/
|
||||||
|
|
||||||
# Guard against pulling in any models that might exist in the directory tree
|
# Guard against pulling in any models that might exist in the directory tree
|
||||||
**.pt*
|
**/*.pt*
|
||||||
|
**/*.ckpt
|
||||||
|
|
||||||
# unignore configs, but only ignore the custom models.yaml, in case it exists
|
# Byte-compiled / optimized / DLL files
|
||||||
!configs
|
**/__pycache__/
|
||||||
configs/models.yaml
|
**/*.py[cod]
|
||||||
configs/models.yaml.orig
|
|
||||||
|
|
||||||
**/__pycache__
|
# Distribution / packaging
|
||||||
|
**/*.egg-info/
|
||||||
|
**/*.egg
|
||||||
|
1
.git-blame-ignore-revs
Normal file
@ -0,0 +1 @@
|
|||||||
|
b3dccfaeb636599c02effc377cdd8a87d658256c
|
41
.github/CODEOWNERS
vendored
@ -1,7 +1,34 @@
|
|||||||
ldm/invoke/pngwriter.py @CapableWeb
|
# continuous integration
|
||||||
ldm/invoke/server_legacy.py @CapableWeb
|
/.github/workflows/ @mauwii @lstein @blessedcoolant
|
||||||
scripts/legacy_api.py @CapableWeb
|
|
||||||
tests/legacy_tests.sh @CapableWeb
|
# documentation
|
||||||
installer/ @ebr
|
/docs/ @lstein @mauwii @tildebyte @blessedcoolant
|
||||||
.github/workflows/ @mauwii
|
/mkdocs.yml @lstein @mauwii @blessedcoolant
|
||||||
docker_build/ @mauwii
|
|
||||||
|
# nodes
|
||||||
|
/invokeai/app/ @Kyle0654 @blessedcoolant
|
||||||
|
|
||||||
|
# installation and configuration
|
||||||
|
/pyproject.toml @mauwii @lstein @blessedcoolant
|
||||||
|
/docker/ @mauwii @lstein @blessedcoolant
|
||||||
|
/scripts/ @ebr @lstein
|
||||||
|
/installer/ @lstein @ebr
|
||||||
|
/invokeai/assets @lstein @ebr
|
||||||
|
/invokeai/configs @lstein
|
||||||
|
/invokeai/version @lstein @blessedcoolant
|
||||||
|
|
||||||
|
# web ui
|
||||||
|
/invokeai/frontend @blessedcoolant @psychedelicious @lstein
|
||||||
|
/invokeai/backend @blessedcoolant @psychedelicious @lstein
|
||||||
|
|
||||||
|
# generation, model management, postprocessing
|
||||||
|
/invokeai/backend @keturn @damian0815 @lstein @blessedcoolant @jpphoto
|
||||||
|
|
||||||
|
# front ends
|
||||||
|
/invokeai/frontend/CLI @lstein
|
||||||
|
/invokeai/frontend/install @lstein @ebr @mauwii
|
||||||
|
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||||
|
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||||
|
/invokeai/frontend/web @psychedelicious @blessedcoolant
|
||||||
|
|
||||||
|
|
||||||
|
10
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@ -66,6 +66,16 @@ body:
|
|||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: version-number
|
||||||
|
attributes:
|
||||||
|
label: What version did you experience this issue on?
|
||||||
|
description: |
|
||||||
|
Please share the version of Invoke AI that you experienced the issue on. If this is not the latest version, please update first to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||||
|
placeholder: X.X.X
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: what-happened
|
id: what-happened
|
||||||
attributes:
|
attributes:
|
||||||
|
88
.github/workflows/build-cloud-img.yml
vendored
@ -1,88 +0,0 @@
|
|||||||
name: Build and push cloud image
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
# push:
|
|
||||||
# branches:
|
|
||||||
# - main
|
|
||||||
# tags:
|
|
||||||
# - v*
|
|
||||||
# # we will NOT push the image on pull requests, only test buildability.
|
|
||||||
# pull_request:
|
|
||||||
# branches:
|
|
||||||
# - main
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
env:
|
|
||||||
REGISTRY: ghcr.io
|
|
||||||
IMAGE_NAME: ${{ github.repository }}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
docker:
|
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
arch:
|
|
||||||
- x86_64
|
|
||||||
# requires resolving a patchmatch issue
|
|
||||||
# - aarch64
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: ${{ matrix.arch }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v2
|
|
||||||
if: matrix.arch == 'aarch64'
|
|
||||||
|
|
||||||
- name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v4
|
|
||||||
with:
|
|
||||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
|
||||||
# see https://github.com/docker/metadata-action
|
|
||||||
# will push the following tags:
|
|
||||||
# :edge
|
|
||||||
# :main (+ any other branches enabled in the workflow)
|
|
||||||
# :<tag>
|
|
||||||
# :1.2.3 (for semver tags)
|
|
||||||
# :1.2 (for semver tags)
|
|
||||||
# :<sha>
|
|
||||||
tags: |
|
|
||||||
type=edge,branch=main
|
|
||||||
type=ref,event=branch
|
|
||||||
type=ref,event=tag
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=sha
|
|
||||||
# suffix image tags with architecture
|
|
||||||
flavor: |
|
|
||||||
latest=auto
|
|
||||||
suffix=-${{ matrix.arch }},latest=true
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
|
|
||||||
# do not login to container registry on PRs
|
|
||||||
- if: github.event_name != 'pull_request'
|
|
||||||
name: Docker login
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Build and push cloud image
|
|
||||||
uses: docker/build-push-action@v3
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
file: docker-build/Dockerfile.cloud
|
|
||||||
platforms: Linux/${{ matrix.arch }}
|
|
||||||
# do not push the image on PRs
|
|
||||||
push: false
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
76
.github/workflows/build-container.yml
vendored
@ -3,8 +3,21 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
|
- 'update/ci/docker/*'
|
||||||
|
- 'update/docker/*'
|
||||||
|
- 'dev/ci/docker/*'
|
||||||
|
- 'dev/docker/*'
|
||||||
|
paths:
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- '.dockerignore'
|
||||||
|
- 'invokeai/**'
|
||||||
|
- 'docker/Dockerfile'
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- 'v*.*.*'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
docker:
|
docker:
|
||||||
@ -13,19 +26,21 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
flavor:
|
flavor:
|
||||||
- amd
|
- rocm
|
||||||
- cuda
|
- cuda
|
||||||
|
- cpu
|
||||||
include:
|
include:
|
||||||
- flavor: amd
|
- flavor: rocm
|
||||||
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||||
dockerfile: docker-build/Dockerfile
|
|
||||||
platforms: linux/amd64,linux/arm64
|
|
||||||
- flavor: cuda
|
- flavor: cuda
|
||||||
pip-extra-index-url: ''
|
pip-extra-index-url: ''
|
||||||
dockerfile: docker-build/Dockerfile
|
- flavor: cpu
|
||||||
platforms: linux/amd64,linux/arm64
|
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: ${{ matrix.flavor }}
|
name: ${{ matrix.flavor }}
|
||||||
|
env:
|
||||||
|
PLATFORMS: 'linux/amd64,linux/arm64'
|
||||||
|
DOCKERFILE: 'docker/Dockerfile'
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -34,22 +49,28 @@ jobs:
|
|||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v4
|
||||||
with:
|
with:
|
||||||
images: ghcr.io/${{ github.repository }}-${{ matrix.flavor }}
|
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
images: |
|
||||||
|
ghcr.io/${{ github.repository }}
|
||||||
|
${{ vars.DOCKERHUB_REPOSITORY }}
|
||||||
tags: |
|
tags: |
|
||||||
type=ref,event=branch
|
type=ref,event=branch
|
||||||
type=ref,event=tag
|
type=ref,event=tag
|
||||||
type=semver,pattern={{version}}
|
type=pep440,pattern={{version}}
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
type=pep440,pattern={{major}}.{{minor}}
|
||||||
type=semver,pattern={{major}}
|
type=pep440,pattern={{major}}
|
||||||
type=sha
|
type=sha,enable=true,prefix=sha-,format=short
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=true
|
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
|
||||||
|
suffix=-${{ matrix.flavor }},onlatest=false
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v2
|
||||||
|
with:
|
||||||
|
platforms: ${{ env.PLATFORMS }}
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
@ -59,15 +80,34 @@ jobs:
|
|||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
if: github.event_name != 'pull_request' && vars.DOCKERHUB_REPOSITORY != ''
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build container
|
- name: Build container
|
||||||
uses: docker/build-push-action@v3
|
id: docker_build
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: ${{ matrix.dockerfile }}
|
file: ${{ env.DOCKERFILE }}
|
||||||
platforms: ${{ matrix.platforms }}
|
platforms: ${{ env.PLATFORMS }}
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.ref == 'refs/heads/main' || github.ref_type == 'tag' }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
|
||||||
# cache-from: type=gha
|
cache-from: |
|
||||||
# cache-to: type=gha,mode=max
|
type=gha,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||||
|
type=gha,scope=main-${{ matrix.flavor }}
|
||||||
|
cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-${{ matrix.flavor }}
|
||||||
|
|
||||||
|
- name: Docker Hub Description
|
||||||
|
if: github.ref == 'refs/heads/main' || github.ref == 'refs/tags/*' && vars.DOCKERHUB_REPOSITORY != ''
|
||||||
|
uses: peter-evans/dockerhub-description@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
repository: ${{ vars.DOCKERHUB_REPOSITORY }}
|
||||||
|
short-description: ${{ github.event.repository.description }}
|
||||||
|
27
.github/workflows/close-inactive-issues.yml
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
name: Close inactive issues
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: "00 6 * * *"
|
||||||
|
|
||||||
|
env:
|
||||||
|
DAYS_BEFORE_ISSUE_STALE: 14
|
||||||
|
DAYS_BEFORE_ISSUE_CLOSE: 28
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
close-issues:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v5
|
||||||
|
with:
|
||||||
|
days-before-issue-stale: ${{ env.DAYS_BEFORE_ISSUE_STALE }}
|
||||||
|
days-before-issue-close: ${{ env.DAYS_BEFORE_ISSUE_CLOSE }}
|
||||||
|
stale-issue-label: "Inactive Issue"
|
||||||
|
stale-issue-message: "There has been no activity in this issue for ${{ env.DAYS_BEFORE_ISSUE_STALE }} days. If this issue is still being experienced, please reply with an updated confirmation that the issue is still being experienced with the latest release."
|
||||||
|
close-issue-message: "Due to inactivity, this issue was automatically closed. If you are still experiencing the issue, please recreate the issue."
|
||||||
|
days-before-pr-stale: -1
|
||||||
|
days-before-pr-close: -1
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
operations-per-run: 500
|
22
.github/workflows/lint-frontend.yml
vendored
@ -3,14 +3,22 @@ name: Lint frontend
|
|||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- 'invokeai/frontend/**'
|
- 'invokeai/frontend/web/**'
|
||||||
|
types:
|
||||||
|
- 'ready_for_review'
|
||||||
|
- 'opened'
|
||||||
|
- 'synchronize'
|
||||||
push:
|
push:
|
||||||
|
branches:
|
||||||
|
- 'main'
|
||||||
paths:
|
paths:
|
||||||
- 'invokeai/frontend/**'
|
- 'invokeai/frontend/web/**'
|
||||||
|
merge_group:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
working-directory: invokeai/frontend
|
working-directory: invokeai/frontend/web
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint-frontend:
|
lint-frontend:
|
||||||
@ -23,7 +31,7 @@ jobs:
|
|||||||
node-version: '18'
|
node-version: '18'
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- run: 'yarn install --frozen-lockfile'
|
- run: 'yarn install --frozen-lockfile'
|
||||||
- run: 'yarn tsc'
|
- run: 'yarn run lint:tsc'
|
||||||
- run: 'yarn run madge'
|
- run: 'yarn run lint:madge'
|
||||||
- run: 'yarn run lint --max-warnings=0'
|
- run: 'yarn run lint:eslint'
|
||||||
- run: 'yarn run prettier --check'
|
- run: 'yarn run lint:prettier'
|
||||||
|
3
.github/workflows/mkdocs-material.yml
vendored
@ -5,6 +5,9 @@ on:
|
|||||||
- 'main'
|
- 'main'
|
||||||
- 'development'
|
- 'development'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
mkdocs-material:
|
mkdocs-material:
|
||||||
if: github.event.pull_request.draft == false
|
if: github.event.pull_request.draft == false
|
||||||
|
41
.github/workflows/pypi-release.yml
vendored
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
name: PyPI Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'invokeai/version/invokeai_version.py'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
if: github.repository == 'invoke-ai/InvokeAI'
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
env:
|
||||||
|
TWINE_USERNAME: __token__
|
||||||
|
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
|
TWINE_NON_INTERACTIVE: 1
|
||||||
|
steps:
|
||||||
|
- name: checkout sources
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: install deps
|
||||||
|
run: pip install --upgrade build twine
|
||||||
|
|
||||||
|
- name: build package
|
||||||
|
run: python3 -m build
|
||||||
|
|
||||||
|
- name: check distribution
|
||||||
|
run: twine check dist/*
|
||||||
|
|
||||||
|
- name: check PyPI versions
|
||||||
|
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/v2.3'
|
||||||
|
run: |
|
||||||
|
pip install --upgrade requests
|
||||||
|
python -c "\
|
||||||
|
import scripts.pypi_helper; \
|
||||||
|
EXISTS=scripts.pypi_helper.local_on_pypi(); \
|
||||||
|
print(f'PACKAGE_EXISTS={EXISTS}')" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: upload package
|
||||||
|
if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != ''
|
||||||
|
run: twine upload dist/*
|
66
.github/workflows/test-invoke-pip-skip.yml
vendored
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
name: Test invoke.py pip
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- '**'
|
||||||
|
- '!pyproject.toml'
|
||||||
|
- '!invokeai/**'
|
||||||
|
- 'invokeai/frontend/web/**'
|
||||||
|
merge_group:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
matrix:
|
||||||
|
if: github.event.pull_request.draft == false
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
python-version:
|
||||||
|
# - '3.9'
|
||||||
|
- '3.10'
|
||||||
|
pytorch:
|
||||||
|
# - linux-cuda-11_6
|
||||||
|
- linux-cuda-11_7
|
||||||
|
- linux-rocm-5_2
|
||||||
|
- linux-cpu
|
||||||
|
- macos-default
|
||||||
|
- windows-cpu
|
||||||
|
# - windows-cuda-11_6
|
||||||
|
# - windows-cuda-11_7
|
||||||
|
include:
|
||||||
|
# - pytorch: linux-cuda-11_6
|
||||||
|
# os: ubuntu-22.04
|
||||||
|
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||||
|
# github-env: $GITHUB_ENV
|
||||||
|
- pytorch: linux-cuda-11_7
|
||||||
|
os: ubuntu-22.04
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- pytorch: linux-rocm-5_2
|
||||||
|
os: ubuntu-22.04
|
||||||
|
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- pytorch: linux-cpu
|
||||||
|
os: ubuntu-22.04
|
||||||
|
extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- pytorch: macos-default
|
||||||
|
os: macOS-12
|
||||||
|
github-env: $GITHUB_ENV
|
||||||
|
- pytorch: windows-cpu
|
||||||
|
os: windows-2022
|
||||||
|
github-env: $env:GITHUB_ENV
|
||||||
|
# - pytorch: windows-cuda-11_6
|
||||||
|
# os: windows-2022
|
||||||
|
# extra-index-url: 'https://download.pytorch.org/whl/cu116'
|
||||||
|
# github-env: $env:GITHUB_ENV
|
||||||
|
# - pytorch: windows-cuda-11_7
|
||||||
|
# os: windows-2022
|
||||||
|
# extra-index-url: 'https://download.pytorch.org/whl/cu117'
|
||||||
|
# github-env: $env:GITHUB_ENV
|
||||||
|
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- run: 'echo "No build required"'
|
62
.github/workflows/test-invoke-pip.yml
vendored
@ -3,11 +3,21 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'main'
|
- 'main'
|
||||||
|
paths:
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- 'invokeai/**'
|
||||||
|
- '!invokeai/frontend/web/**'
|
||||||
pull_request:
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- 'invokeai/**'
|
||||||
|
- '!invokeai/frontend/web/**'
|
||||||
types:
|
types:
|
||||||
- 'ready_for_review'
|
- 'ready_for_review'
|
||||||
- 'opened'
|
- 'opened'
|
||||||
- 'synchronize'
|
- 'synchronize'
|
||||||
|
merge_group:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
@ -62,28 +72,13 @@ jobs:
|
|||||||
# github-env: $env:GITHUB_ENV
|
# github-env: $env:GITHUB_ENV
|
||||||
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
env:
|
||||||
|
PIP_USE_PEP517: '1'
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
id: checkout-sources
|
id: checkout-sources
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: setup python
|
|
||||||
uses: actions/setup-python@v4
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
|
|
||||||
- name: Set Cache-Directory Windows
|
|
||||||
if: runner.os == 'Windows'
|
|
||||||
id: set-cache-dir-windows
|
|
||||||
run: |
|
|
||||||
echo "CACHE_DIR=$HOME\invokeai\models" >> ${{ matrix.github-env }}
|
|
||||||
echo "PIP_NO_CACHE_DIR=1" >> ${{ matrix.github-env }}
|
|
||||||
|
|
||||||
- name: Set Cache-Directory others
|
|
||||||
if: runner.os != 'Windows'
|
|
||||||
id: set-cache-dir-others
|
|
||||||
run: echo "CACHE_DIR=$HOME/invokeai/models" >> ${{ matrix.github-env }}
|
|
||||||
|
|
||||||
- name: set test prompt to main branch validation
|
- name: set test prompt to main branch validation
|
||||||
if: ${{ github.ref == 'refs/heads/main' }}
|
if: ${{ github.ref == 'refs/heads/main' }}
|
||||||
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
|
||||||
@ -92,26 +87,29 @@ jobs:
|
|||||||
if: ${{ github.ref != 'refs/heads/main' }}
|
if: ${{ github.ref != 'refs/heads/main' }}
|
||||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||||
|
|
||||||
|
- name: setup python
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
cache: pip
|
||||||
|
cache-dependency-path: pyproject.toml
|
||||||
|
|
||||||
- name: install invokeai
|
- name: install invokeai
|
||||||
env:
|
env:
|
||||||
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
|
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
|
||||||
run: >
|
run: >
|
||||||
pip3 install
|
pip3 install
|
||||||
--use-pep517
|
|
||||||
--editable=".[test]"
|
--editable=".[test]"
|
||||||
|
|
||||||
- name: run pytest
|
- name: run pytest
|
||||||
|
id: run-pytest
|
||||||
run: pytest
|
run: pytest
|
||||||
|
|
||||||
- name: Use Cached models
|
- name: set INVOKEAI_OUTDIR
|
||||||
id: cache-sd-model
|
run: >
|
||||||
uses: actions/cache@v3
|
python -c
|
||||||
env:
|
"import os;from invokeai.backend.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
|
||||||
cache-name: huggingface-models
|
>> ${{ matrix.github-env }}
|
||||||
with:
|
|
||||||
path: ${{ env.CACHE_DIR }}
|
|
||||||
key: ${{ env.cache-name }}
|
|
||||||
enableCrossOsArchive: true
|
|
||||||
|
|
||||||
- name: run invokeai-configure
|
- name: run invokeai-configure
|
||||||
id: run-preload-models
|
id: run-preload-models
|
||||||
@ -124,9 +122,8 @@ jobs:
|
|||||||
--full-precision
|
--full-precision
|
||||||
# can't use fp16 weights without a GPU
|
# can't use fp16 weights without a GPU
|
||||||
|
|
||||||
- name: Run the tests
|
- name: run invokeai
|
||||||
if: runner.os != 'Windows'
|
id: run-invokeai
|
||||||
id: run-tests
|
|
||||||
env:
|
env:
|
||||||
# Set offline mode to make sure configure preloaded successfully.
|
# Set offline mode to make sure configure preloaded successfully.
|
||||||
HF_HUB_OFFLINE: 1
|
HF_HUB_OFFLINE: 1
|
||||||
@ -137,10 +134,11 @@ jobs:
|
|||||||
--no-patchmatch
|
--no-patchmatch
|
||||||
--no-nsfw_checker
|
--no-nsfw_checker
|
||||||
--from_file ${{ env.TEST_PROMPTS }}
|
--from_file ${{ env.TEST_PROMPTS }}
|
||||||
|
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
|
||||||
|
|
||||||
- name: Archive results
|
- name: Archive results
|
||||||
id: archive-results
|
id: archive-results
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: results_${{ matrix.pytorch }}_${{ matrix.python-version }}
|
name: results
|
||||||
path: ${{ env.INVOKEAI_ROOT }}/outputs
|
path: ${{ env.INVOKEAI_OUTDIR }}
|
||||||
|
14
.gitignore
vendored
@ -1,4 +1,5 @@
|
|||||||
# ignore default image save location and model symbolic link
|
# ignore default image save location and model symbolic link
|
||||||
|
.idea/
|
||||||
embeddings/
|
embeddings/
|
||||||
outputs/
|
outputs/
|
||||||
models/ldm/stable-diffusion-v1/model.ckpt
|
models/ldm/stable-diffusion-v1/model.ckpt
|
||||||
@ -62,15 +63,18 @@ pip-delete-this-directory.txt
|
|||||||
htmlcov/
|
htmlcov/
|
||||||
.tox/
|
.tox/
|
||||||
.nox/
|
.nox/
|
||||||
|
.coveragerc
|
||||||
.coverage
|
.coverage
|
||||||
.coverage.*
|
.coverage.*
|
||||||
.cache
|
.cache
|
||||||
nosetests.xml
|
nosetests.xml
|
||||||
coverage.xml
|
coverage.xml
|
||||||
|
cov.xml
|
||||||
*.cover
|
*.cover
|
||||||
*.py,cover
|
*.py,cover
|
||||||
.hypothesis/
|
.hypothesis/
|
||||||
.pytest_cache/
|
.pytest_cache/
|
||||||
|
.pytest.ini
|
||||||
cover/
|
cover/
|
||||||
junit/
|
junit/
|
||||||
|
|
||||||
@ -196,7 +200,7 @@ checkpoints
|
|||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
# Let the frontend manage its own gitignore
|
# Let the frontend manage its own gitignore
|
||||||
!invokeai/frontend/*
|
!invokeai/frontend/web/*
|
||||||
|
|
||||||
# Scratch folder
|
# Scratch folder
|
||||||
.scratch/
|
.scratch/
|
||||||
@ -211,11 +215,6 @@ gfpgan/
|
|||||||
# config file (will be created by installer)
|
# config file (will be created by installer)
|
||||||
configs/models.yaml
|
configs/models.yaml
|
||||||
|
|
||||||
# weights (will be created by installer)
|
|
||||||
models/ldm/stable-diffusion-v1/*.ckpt
|
|
||||||
models/clipseg
|
|
||||||
models/gfpgan
|
|
||||||
|
|
||||||
# ignore initfile
|
# ignore initfile
|
||||||
.invokeai
|
.invokeai
|
||||||
|
|
||||||
@ -230,6 +229,3 @@ installer/install.bat
|
|||||||
installer/install.sh
|
installer/install.sh
|
||||||
installer/update.bat
|
installer/update.bat
|
||||||
installer/update.sh
|
installer/update.sh
|
||||||
|
|
||||||
# no longer stored in source directory
|
|
||||||
models
|
|
159
README.md
@ -1,6 +1,6 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
# InvokeAI: A Stable Diffusion Toolkit
|
# InvokeAI: A Stable Diffusion Toolkit
|
||||||
|
|
||||||
@ -10,10 +10,10 @@
|
|||||||
|
|
||||||
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
|
[![CI checks on main badge]][CI checks on main link] [![latest commit to main badge]][latest commit to main link]
|
||||||
|
|
||||||
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
|
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link] [![translation status badge]][translation status link]
|
||||||
|
|
||||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
[CI checks on main link]:https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Amain
|
||||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||||
@ -28,12 +28,14 @@
|
|||||||
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
|
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
|
||||||
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
|
||||||
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
|
||||||
|
[translation status badge]: https://hosted.weblate.org/widgets/invokeai/-/svg-badge.svg
|
||||||
|
[translation status link]: https://hosted.weblate.org/engage/invokeai/
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
|
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
|
||||||
|
|
||||||
**Quick links**: [[How to Install](#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
**Quick links**: [[How to Install](https://invoke-ai.github.io/InvokeAI/#installation)] [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||||
|
|
||||||
_Note: InvokeAI is rapidly evolving. Please use the
|
_Note: InvokeAI is rapidly evolving. Please use the
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||||
@ -41,38 +43,136 @@ requests. Be sure to use the provided templates. They will help us diagnose issu
|
|||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
# Getting Started with InvokeAI
|
## Table of Contents
|
||||||
|
|
||||||
|
1. [Quick Start](#getting-started-with-invokeai)
|
||||||
|
2. [Installation](#detailed-installation-instructions)
|
||||||
|
3. [Hardware Requirements](#hardware-requirements)
|
||||||
|
4. [Features](#features)
|
||||||
|
5. [Latest Changes](#latest-changes)
|
||||||
|
6. [Troubleshooting](#troubleshooting)
|
||||||
|
7. [Contributing](#contributing)
|
||||||
|
8. [Contributors](#contributors)
|
||||||
|
9. [Support](#support)
|
||||||
|
10. [Further Reading](#further-reading)
|
||||||
|
|
||||||
|
## Getting Started with InvokeAI
|
||||||
|
|
||||||
For full installation and upgrade instructions, please see:
|
For full installation and upgrade instructions, please see:
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||||
|
|
||||||
|
### Automatic Installer (suggested for 1st time users)
|
||||||
|
|
||||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
||||||
|
|
||||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||||
|
|
||||||
3. Unzip the file.
|
3. Unzip the file.
|
||||||
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
|
|
||||||
5. Wait a while, until it is done.
|
|
||||||
6. The folder where you ran the installer from will now be filled with lots of files. If you are on Windows, double-click on the `invoke.bat` file. On macOS, open a Terminal window, drag `invoke.sh` from the folder into the Terminal, and press return. On Linux, run `invoke.sh`
|
|
||||||
7. Press 2 to open the "browser-based UI", press enter/return, wait a minute or two for Stable Diffusion to start up, then open your browser and go to http://localhost:9090.
|
|
||||||
8. Type `banana sushi` in the box on the top left and click `Invoke`
|
|
||||||
|
|
||||||
|
4. If you are on Windows, double-click on the `install.bat` script. On
|
||||||
|
macOS, open a Terminal window, drag the file `install.sh` from Finder
|
||||||
|
into the Terminal, and press return. On Linux, run `install.sh`.
|
||||||
|
|
||||||
## Table of Contents
|
5. You'll be asked to confirm the location of the folder in which
|
||||||
|
to install InvokeAI and its image generation model files. Pick a
|
||||||
|
location with at least 15 GB of free memory. More if you plan on
|
||||||
|
installing lots of models.
|
||||||
|
|
||||||
1. [Installation](#installation)
|
6. Wait while the installer does its thing. After installing the software,
|
||||||
2. [Hardware Requirements](#hardware-requirements)
|
the installer will launch a script that lets you configure InvokeAI and
|
||||||
3. [Features](#features)
|
select a set of starting image generaiton models.
|
||||||
4. [Latest Changes](#latest-changes)
|
|
||||||
5. [Troubleshooting](#troubleshooting)
|
|
||||||
6. [Contributing](#contributing)
|
|
||||||
7. [Contributors](#contributors)
|
|
||||||
8. [Support](#support)
|
|
||||||
9. [Further Reading](#further-reading)
|
|
||||||
|
|
||||||
## Installation
|
7. Find the folder that InvokeAI was installed into (it is not the
|
||||||
|
same as the unpacked zip file directory!) The default location of this
|
||||||
|
folder (if you didn't change it in step 5) is `~/invokeai` on
|
||||||
|
Linux/Mac systems, and `C:\Users\YourName\invokeai` on Windows. This directory will contain launcher scripts named `invoke.sh` and `invoke.bat`.
|
||||||
|
|
||||||
|
8. On Windows systems, double-click on the `invoke.bat` file. On
|
||||||
|
macOS, open a Terminal window, drag `invoke.sh` from the folder into
|
||||||
|
the Terminal, and press return. On Linux, run `invoke.sh`
|
||||||
|
|
||||||
|
9. Press 2 to open the "browser-based UI", press enter/return, wait a
|
||||||
|
minute or two for Stable Diffusion to start up, then open your browser
|
||||||
|
and go to http://localhost:9090.
|
||||||
|
|
||||||
|
10. Type `banana sushi` in the box on the top left and click `Invoke`
|
||||||
|
|
||||||
|
### Command-Line Installation (for users familiar with Terminals)
|
||||||
|
|
||||||
|
You must have Python 3.9 or 3.10 installed on your machine. Earlier or later versions are
|
||||||
|
not supported.
|
||||||
|
|
||||||
|
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
||||||
|
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
mkdir invokeai
|
||||||
|
````
|
||||||
|
|
||||||
|
3. Create a virtual environment named `.venv` inside this directory and activate it:
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
cd invokeai
|
||||||
|
python -m venv .venv --prompt InvokeAI
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Activate the virtual environment (do it every time you run InvokeAI)
|
||||||
|
|
||||||
|
_For Linux/Mac users:_
|
||||||
|
|
||||||
|
```sh
|
||||||
|
source .venv/bin/activate
|
||||||
|
```
|
||||||
|
|
||||||
|
_For Windows users:_
|
||||||
|
|
||||||
|
```ps
|
||||||
|
.venv\Scripts\activate
|
||||||
|
```
|
||||||
|
|
||||||
|
5. Install the InvokeAI module and its dependencies. Choose the command suited for your platform & GPU.
|
||||||
|
|
||||||
|
_For Windows/Linux with an NVIDIA GPU:_
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
|
```
|
||||||
|
|
||||||
|
_For Linux with an AMD GPU:_
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
|
```
|
||||||
|
|
||||||
|
_For Macintoshes, either Intel or M1/M2:_
|
||||||
|
|
||||||
|
```sh
|
||||||
|
pip install InvokeAI --use-pep517
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Configure InvokeAI and install a starting set of image generation models (you only need to do this once):
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
invokeai-configure
|
||||||
|
```
|
||||||
|
|
||||||
|
7. Launch the web server (do it every time you run InvokeAI):
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
invokeai --web
|
||||||
|
```
|
||||||
|
|
||||||
|
8. Point your browser to http://localhost:9090 to bring up the web interface.
|
||||||
|
9. Type `banana sushi` in the box on the top left and click `Invoke`.
|
||||||
|
|
||||||
|
Be sure to activate the virtual environment each time before re-launching InvokeAI,
|
||||||
|
using `source .venv/bin/activate` or `.venv\Scripts\activate`.
|
||||||
|
|
||||||
|
### Detailed Installation Instructions
|
||||||
|
|
||||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
This fork is supported across Linux, Windows and Macintosh. Linux
|
||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
users can use either an Nvidia-based card (with CUDA support) or an
|
||||||
@ -80,28 +180,29 @@ AMD card (using the ROCm driver). For full installation and upgrade
|
|||||||
instructions, please see:
|
instructions, please see:
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/INSTALL_SOURCE/)
|
||||||
|
|
||||||
### Hardware Requirements
|
## Hardware Requirements
|
||||||
|
|
||||||
InvokeAI is supported across Linux, Windows and macOS. Linux
|
InvokeAI is supported across Linux, Windows and macOS. Linux
|
||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
users can use either an Nvidia-based card (with CUDA support) or an
|
||||||
AMD card (using the ROCm driver).
|
AMD card (using the ROCm driver).
|
||||||
|
|
||||||
#### System
|
### System
|
||||||
|
|
||||||
You will need one of the following:
|
You will need one of the following:
|
||||||
|
|
||||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||||
- An Apple computer with an M1 chip.
|
- An Apple computer with an M1 chip.
|
||||||
|
- An AMD-based graphics card with 4GB or more VRAM memory. (Linux only)
|
||||||
|
|
||||||
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
||||||
unable to run in half-precision mode and do not have sufficient VRAM
|
unable to run in half-precision mode and do not have sufficient VRAM
|
||||||
to render 512x512 images.
|
to render 512x512 images.
|
||||||
|
|
||||||
#### Memory
|
### Memory
|
||||||
|
|
||||||
- At least 12 GB Main Memory RAM.
|
- At least 12 GB Main Memory RAM.
|
||||||
|
|
||||||
#### Disk
|
### Disk
|
||||||
|
|
||||||
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||||
|
|
||||||
@ -151,13 +252,15 @@ Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
|
|||||||
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
||||||
problems and other issues.
|
problems and other issues.
|
||||||
|
|
||||||
# Contributing
|
## Contributing
|
||||||
|
|
||||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||||
cleanup, testing, or code reviews, is very much encouraged to do so.
|
cleanup, testing, or code reviews, is very much encouraged to do so.
|
||||||
|
|
||||||
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
||||||
|
|
||||||
|
If you'd like to help with translation, please see our [translation guide](docs/other/TRANSLATION.md).
|
||||||
|
|
||||||
If you are unfamiliar with how
|
If you are unfamiliar with how
|
||||||
to contribute to GitHub projects, here is a
|
to contribute to GitHub projects, here is a
|
||||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
||||||
@ -174,6 +277,8 @@ This fork is a combined effort of various people from across the world.
|
|||||||
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
[Check out the list of all these amazing people](https://invoke-ai.github.io/InvokeAI/other/CONTRIBUTORS/). We thank them for
|
||||||
their time, hard work and effort.
|
their time, hard work and effort.
|
||||||
|
|
||||||
|
Thanks to [Weblate](https://weblate.org/) for generously providing translation services to this project.
|
||||||
|
|
||||||
### Support
|
### Support
|
||||||
|
|
||||||
For support, please use this repository's GitHub Issues tracking service, or join the Discord.
|
For support, please use this repository's GitHub Issues tracking service, or join the Discord.
|
||||||
|
@ -147,7 +147,7 @@ echo ***** Installed invoke launcher script ******
|
|||||||
rd /s /q binary_installer installer_files
|
rd /s /q binary_installer installer_files
|
||||||
|
|
||||||
@rem preload the models
|
@rem preload the models
|
||||||
call .venv\Scripts\python scripts\configure_invokeai.py
|
call .venv\Scripts\python ldm\invoke\config\invokeai_configure.py
|
||||||
set err_msg=----- model download clone failed -----
|
set err_msg=----- model download clone failed -----
|
||||||
if %errorlevel% neq 0 goto err_exit
|
if %errorlevel% neq 0 goto err_exit
|
||||||
deactivate
|
deactivate
|
||||||
|
4
coverage/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
# Ignore everything in this directory
|
||||||
|
*
|
||||||
|
# Except this file
|
||||||
|
!.gitignore
|
@ -1,78 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1
|
|
||||||
FROM python:3.9-slim AS python-base
|
|
||||||
|
|
||||||
# use bash
|
|
||||||
SHELL [ "/bin/bash", "-c" ]
|
|
||||||
|
|
||||||
# Install necesarry packages
|
|
||||||
RUN \
|
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
libgl1-mesa-glx=20.3.* \
|
|
||||||
libglib2.0-0=2.66.* \
|
|
||||||
libopencv-dev=4.5.* \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
ARG APPDIR=/usr/src/app
|
|
||||||
ENV APPDIR ${APPDIR}
|
|
||||||
WORKDIR ${APPDIR}
|
|
||||||
|
|
||||||
FROM python-base AS builder
|
|
||||||
|
|
||||||
RUN \
|
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
gcc=4:10.2.* \
|
|
||||||
python3-dev=3.9.* \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# copy sources
|
|
||||||
COPY --link . .
|
|
||||||
ARG PIP_EXTRA_INDEX_URL
|
|
||||||
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
|
||||||
|
|
||||||
# install requirements
|
|
||||||
RUN python3 -m venv invokeai \
|
|
||||||
&& ${APPDIR}/invokeai/bin/pip \
|
|
||||||
install \
|
|
||||||
--no-cache-dir \
|
|
||||||
--use-pep517 \
|
|
||||||
.
|
|
||||||
|
|
||||||
FROM python-base AS runtime
|
|
||||||
|
|
||||||
# setup environment
|
|
||||||
COPY --link . .
|
|
||||||
COPY --from=builder ${APPDIR}/invokeai ${APPDIR}/invokeai
|
|
||||||
ENV PATH=${APPDIR}/invokeai/bin:$PATH
|
|
||||||
ENV INVOKEAI_ROOT=/data
|
|
||||||
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
|
|
||||||
|
|
||||||
# build patchmatch
|
|
||||||
RUN \
|
|
||||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt-get update \
|
|
||||||
&& apt-get install -y \
|
|
||||||
--no-install-recommends \
|
|
||||||
build-essential=12.9 \
|
|
||||||
&& PYTHONDONTWRITEBYTECODE=1 \
|
|
||||||
python3 -c "from patchmatch import patch_match" \
|
|
||||||
&& apt-get remove -y \
|
|
||||||
--autoremove \
|
|
||||||
build-essential \
|
|
||||||
&& apt-get autoclean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# set Entrypoint and default CMD
|
|
||||||
ENTRYPOINT [ "invoke" ]
|
|
||||||
CMD [ "--web", "--host=0.0.0.0" ]
|
|
||||||
VOLUME [ "/data" ]
|
|
@ -1,86 +0,0 @@
|
|||||||
#######################
|
|
||||||
#### Builder stage ####
|
|
||||||
|
|
||||||
FROM library/ubuntu:22.04 AS builder
|
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt update && apt-get install -y \
|
|
||||||
git \
|
|
||||||
libglib2.0-0 \
|
|
||||||
libgl1-mesa-glx \
|
|
||||||
python3-venv \
|
|
||||||
python3-pip \
|
|
||||||
build-essential \
|
|
||||||
python3-opencv \
|
|
||||||
libopencv-dev
|
|
||||||
|
|
||||||
# This is needed for patchmatch support
|
|
||||||
RUN cd /usr/lib/x86_64-linux-gnu/pkgconfig/ &&\
|
|
||||||
ln -sf opencv4.pc opencv.pc
|
|
||||||
|
|
||||||
ARG WORKDIR=/invokeai
|
|
||||||
WORKDIR ${WORKDIR}
|
|
||||||
|
|
||||||
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
|
||||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
|
||||||
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
||||||
python3 -m venv ${VIRTUAL_ENV} &&\
|
|
||||||
pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
|
|
||||||
torch==1.12.0+cu116 \
|
|
||||||
torchvision==0.13.0+cu116 &&\
|
|
||||||
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
|
|
||||||
|
|
||||||
COPY . .
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
|
||||||
cp environments-and-requirements/requirements-lin-cuda.txt requirements.txt && \
|
|
||||||
pip install -r requirements.txt &&\
|
|
||||||
pip install -e .
|
|
||||||
|
|
||||||
|
|
||||||
#######################
|
|
||||||
#### Runtime stage ####
|
|
||||||
|
|
||||||
FROM library/ubuntu:22.04 as runtime
|
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
|
||||||
ENV PYTHONUNBUFFERED=1
|
|
||||||
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|
||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
|
||||||
apt update && apt install -y --no-install-recommends \
|
|
||||||
git \
|
|
||||||
curl \
|
|
||||||
ncdu \
|
|
||||||
iotop \
|
|
||||||
bzip2 \
|
|
||||||
libglib2.0-0 \
|
|
||||||
libgl1-mesa-glx \
|
|
||||||
python3-venv \
|
|
||||||
python3-pip \
|
|
||||||
build-essential \
|
|
||||||
python3-opencv \
|
|
||||||
libopencv-dev &&\
|
|
||||||
apt-get clean && apt-get autoclean
|
|
||||||
|
|
||||||
ARG WORKDIR=/invokeai
|
|
||||||
WORKDIR ${WORKDIR}
|
|
||||||
|
|
||||||
ENV INVOKEAI_ROOT=/mnt/invokeai
|
|
||||||
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
|
||||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
|
||||||
|
|
||||||
COPY --from=builder ${WORKDIR} ${WORKDIR}
|
|
||||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/pkgconfig /usr/lib/x86_64-linux-gnu/pkgconfig
|
|
||||||
|
|
||||||
# build patchmatch
|
|
||||||
RUN python -c "from patchmatch import patch_match"
|
|
||||||
|
|
||||||
## workaround for non-existent initfile when runtime directory is mounted; see #1613
|
|
||||||
RUN touch /root/.invokeai
|
|
||||||
|
|
||||||
ENTRYPOINT ["bash"]
|
|
||||||
|
|
||||||
CMD ["-c", "python3 scripts/invoke.py --web --host 0.0.0.0"]
|
|
@ -1,44 +0,0 @@
|
|||||||
# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted
|
|
||||||
INVOKEAI_ROOT=/mnt/invokeai
|
|
||||||
# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container
|
|
||||||
HOST_MOUNT_PATH=${HOME}/invokeai
|
|
||||||
|
|
||||||
IMAGE=local/invokeai:latest
|
|
||||||
|
|
||||||
USER=$(shell id -u)
|
|
||||||
GROUP=$(shell id -g)
|
|
||||||
|
|
||||||
# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host.
|
|
||||||
# This is consistent with the expected non-Docker behaviour.
|
|
||||||
# Contents can be moved to a persistent storage and used to prime the cache on another host.
|
|
||||||
|
|
||||||
build:
|
|
||||||
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
|
||||||
|
|
||||||
configure:
|
|
||||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
|
||||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
|
||||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
|
||||||
${IMAGE} -c "python scripts/configure_invokeai.py"
|
|
||||||
|
|
||||||
# Run the container with the runtime dir mounted and the web server exposed on port 9090
|
|
||||||
web:
|
|
||||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
|
||||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
|
||||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
|
||||||
-p 9090:9090 \
|
|
||||||
${IMAGE} -c "python scripts/invoke.py --web --host 0.0.0.0"
|
|
||||||
|
|
||||||
# Run the cli with the runtime dir mounted
|
|
||||||
cli:
|
|
||||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
|
||||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
|
||||||
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
|
||||||
${IMAGE} -c "python scripts/invoke.py"
|
|
||||||
|
|
||||||
# Run the container with the runtime dir mounted and open a bash shell
|
|
||||||
shell:
|
|
||||||
docker run --rm -it --runtime=nvidia --gpus=all \
|
|
||||||
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} ${IMAGE} --
|
|
||||||
|
|
||||||
.PHONY: build configure web cli shell
|
|
@ -1,42 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
|
|
||||||
#
|
|
||||||
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
|
|
||||||
#
|
|
||||||
# CUDA 11.6: https://download.pytorch.org/whl/cu116
|
|
||||||
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
|
|
||||||
# CPU: https://download.pytorch.org/whl/cpu
|
|
||||||
#
|
|
||||||
# as found on https://pytorch.org/get-started/locally/
|
|
||||||
|
|
||||||
cd "$(dirname "$0")" || exit 1
|
|
||||||
|
|
||||||
source ./env.sh
|
|
||||||
|
|
||||||
DOCKERFILE=${INVOKE_DOCKERFILE:-"./Dockerfile"}
|
|
||||||
|
|
||||||
# print the settings
|
|
||||||
echo -e "You are using these values:\n"
|
|
||||||
echo -e "Dockerfile:\t ${DOCKERFILE}"
|
|
||||||
echo -e "extra-index-url: ${PIP_EXTRA_INDEX_URL:-none}"
|
|
||||||
echo -e "Volumename:\t ${VOLUMENAME}"
|
|
||||||
echo -e "arch:\t\t ${ARCH}"
|
|
||||||
echo -e "Platform:\t ${PLATFORM}"
|
|
||||||
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
|
|
||||||
|
|
||||||
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
|
||||||
echo -e "Volume already exists\n"
|
|
||||||
else
|
|
||||||
echo -n "createing docker volume "
|
|
||||||
docker volume create "${VOLUMENAME}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Build Container
|
|
||||||
docker build \
|
|
||||||
--platform="${PLATFORM}" \
|
|
||||||
--tag="${INVOKEAI_TAG}" \
|
|
||||||
${PIP_EXTRA_INDEX_URL:+--build-arg=PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL}"} \
|
|
||||||
--file="${DOCKERFILE}" \
|
|
||||||
..
|
|
@ -1,10 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Variables shared by build.sh and run.sh
|
|
||||||
REPOSITORY_NAME=${REPOSITORY_NAME:-$(basename "$(git rev-parse --show-toplevel)")}
|
|
||||||
VOLUMENAME=${VOLUMENAME:-${REPOSITORY_NAME,,}_data}
|
|
||||||
ARCH=${ARCH:-$(uname -m)}
|
|
||||||
PLATFORM=${PLATFORM:-Linux/${ARCH}}
|
|
||||||
CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda}
|
|
||||||
INVOKEAI_BRANCH=$(git branch --show)
|
|
||||||
INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH##*/}}
|
|
@ -1,30 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
|
|
||||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
|
||||||
|
|
||||||
cd "$(dirname "$0")" || exit 1
|
|
||||||
|
|
||||||
source ./env.sh
|
|
||||||
|
|
||||||
echo -e "You are using these values:\n"
|
|
||||||
echo -e "Volumename:\t${VOLUMENAME}"
|
|
||||||
echo -e "Invokeai_tag:\t${INVOKEAI_TAG}"
|
|
||||||
echo -e "local Models:\t${MODELSPATH:-unset}\n"
|
|
||||||
|
|
||||||
docker run \
|
|
||||||
--interactive \
|
|
||||||
--tty \
|
|
||||||
--rm \
|
|
||||||
--platform="$PLATFORM" \
|
|
||||||
--name="${REPOSITORY_NAME,,}" \
|
|
||||||
--hostname="${REPOSITORY_NAME,,}" \
|
|
||||||
--mount=source="$VOLUMENAME",target=/data \
|
|
||||||
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
|
|
||||||
${MODELSPATH:+--mount=type=bind,source=${MODELSPATH},target=/data/models} \
|
|
||||||
${HUGGING_FACE_HUB_TOKEN:+--env=HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}} \
|
|
||||||
--publish=9090:9090 \
|
|
||||||
--cap-add=sys_nice \
|
|
||||||
${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \
|
|
||||||
"$INVOKEAI_TAG" ${1:+$@}
|
|
107
docker/Dockerfile
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
|
ARG PYTHON_VERSION=3.9
|
||||||
|
##################
|
||||||
|
## base image ##
|
||||||
|
##################
|
||||||
|
FROM --platform=${TARGETPLATFORM} python:${PYTHON_VERSION}-slim AS python-base
|
||||||
|
|
||||||
|
LABEL org.opencontainers.image.authors="mauwii@outlook.de"
|
||||||
|
|
||||||
|
# Prepare apt for buildkit cache
|
||||||
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean \
|
||||||
|
&& echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' >/etc/apt/apt.conf.d/keep-cache
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
RUN \
|
||||||
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
|
apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
libgl1-mesa-glx=20.3.* \
|
||||||
|
libglib2.0-0=2.66.* \
|
||||||
|
libopencv-dev=4.5.*
|
||||||
|
|
||||||
|
# Set working directory and env
|
||||||
|
ARG APPDIR=/usr/src
|
||||||
|
ARG APPNAME=InvokeAI
|
||||||
|
WORKDIR ${APPDIR}
|
||||||
|
ENV PATH ${APPDIR}/${APPNAME}/bin:$PATH
|
||||||
|
# Keeps Python from generating .pyc files in the container
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE 1
|
||||||
|
# Turns off buffering for easier container logging
|
||||||
|
ENV PYTHONUNBUFFERED 1
|
||||||
|
# Don't fall back to legacy build system
|
||||||
|
ENV PIP_USE_PEP517=1
|
||||||
|
|
||||||
|
#######################
|
||||||
|
## build pyproject ##
|
||||||
|
#######################
|
||||||
|
FROM python-base AS pyproject-builder
|
||||||
|
|
||||||
|
# Install build dependencies
|
||||||
|
RUN \
|
||||||
|
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
|
apt-get update \
|
||||||
|
&& apt-get install -y \
|
||||||
|
--no-install-recommends \
|
||||||
|
build-essential=12.9 \
|
||||||
|
gcc=4:10.2.* \
|
||||||
|
python3-dev=3.9.*
|
||||||
|
|
||||||
|
# Prepare pip for buildkit cache
|
||||||
|
ARG PIP_CACHE_DIR=/var/cache/buildkit/pip
|
||||||
|
ENV PIP_CACHE_DIR ${PIP_CACHE_DIR}
|
||||||
|
RUN mkdir -p ${PIP_CACHE_DIR}
|
||||||
|
|
||||||
|
# Create virtual environment
|
||||||
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
|
python3 -m venv "${APPNAME}" \
|
||||||
|
--upgrade-deps
|
||||||
|
|
||||||
|
# Install requirements
|
||||||
|
COPY --link pyproject.toml .
|
||||||
|
COPY --link invokeai/version/invokeai_version.py invokeai/version/__init__.py invokeai/version/
|
||||||
|
ARG PIP_EXTRA_INDEX_URL
|
||||||
|
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
|
||||||
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
|
"${APPNAME}"/bin/pip install .
|
||||||
|
|
||||||
|
# Install pyproject.toml
|
||||||
|
COPY --link . .
|
||||||
|
RUN --mount=type=cache,target=${PIP_CACHE_DIR} \
|
||||||
|
"${APPNAME}/bin/pip" install .
|
||||||
|
|
||||||
|
# Build patchmatch
|
||||||
|
RUN python3 -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
|
#####################
|
||||||
|
## runtime image ##
|
||||||
|
#####################
|
||||||
|
FROM python-base AS runtime
|
||||||
|
|
||||||
|
# Create a new user
|
||||||
|
ARG UNAME=appuser
|
||||||
|
RUN useradd \
|
||||||
|
--no-log-init \
|
||||||
|
-m \
|
||||||
|
-U \
|
||||||
|
"${UNAME}"
|
||||||
|
|
||||||
|
# Create volume directory
|
||||||
|
ARG VOLUME_DIR=/data
|
||||||
|
RUN mkdir -p "${VOLUME_DIR}" \
|
||||||
|
&& chown -hR "${UNAME}:${UNAME}" "${VOLUME_DIR}"
|
||||||
|
|
||||||
|
# Setup runtime environment
|
||||||
|
USER ${UNAME}:${UNAME}
|
||||||
|
COPY --chown=${UNAME}:${UNAME} --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPNAME}
|
||||||
|
ENV INVOKEAI_ROOT ${VOLUME_DIR}
|
||||||
|
ENV TRANSFORMERS_CACHE ${VOLUME_DIR}/.cache
|
||||||
|
ENV INVOKE_MODEL_RECONFIGURE "--yes --default_only"
|
||||||
|
EXPOSE 9090
|
||||||
|
ENTRYPOINT [ "invokeai" ]
|
||||||
|
CMD [ "--web", "--host", "0.0.0.0", "--port", "9090" ]
|
||||||
|
VOLUME [ "${VOLUME_DIR}" ]
|
51
docker/build.sh
Executable file
@ -0,0 +1,51 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# If you want to build a specific flavor, set the CONTAINER_FLAVOR environment variable
|
||||||
|
# e.g. CONTAINER_FLAVOR=cpu ./build.sh
|
||||||
|
# Possible Values are:
|
||||||
|
# - cpu
|
||||||
|
# - cuda
|
||||||
|
# - rocm
|
||||||
|
# Don't forget to also set it when executing run.sh
|
||||||
|
# if it is not set, the script will try to detect the flavor by itself.
|
||||||
|
#
|
||||||
|
# Doc can be found here:
|
||||||
|
# https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||||
|
|
||||||
|
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||||
|
cd "$SCRIPTDIR" || exit 1
|
||||||
|
|
||||||
|
source ./env.sh
|
||||||
|
|
||||||
|
DOCKERFILE=${INVOKE_DOCKERFILE:-./Dockerfile}
|
||||||
|
|
||||||
|
# print the settings
|
||||||
|
echo -e "You are using these values:\n"
|
||||||
|
echo -e "Dockerfile:\t\t${DOCKERFILE}"
|
||||||
|
echo -e "index-url:\t\t${PIP_EXTRA_INDEX_URL:-none}"
|
||||||
|
echo -e "Volumename:\t\t${VOLUMENAME}"
|
||||||
|
echo -e "Platform:\t\t${PLATFORM}"
|
||||||
|
echo -e "Container Registry:\t${CONTAINER_REGISTRY}"
|
||||||
|
echo -e "Container Repository:\t${CONTAINER_REPOSITORY}"
|
||||||
|
echo -e "Container Tag:\t\t${CONTAINER_TAG}"
|
||||||
|
echo -e "Container Flavor:\t${CONTAINER_FLAVOR}"
|
||||||
|
echo -e "Container Image:\t${CONTAINER_IMAGE}\n"
|
||||||
|
|
||||||
|
# Create docker volume
|
||||||
|
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
||||||
|
echo -e "Volume already exists\n"
|
||||||
|
else
|
||||||
|
echo -n "creating docker volume "
|
||||||
|
docker volume create "${VOLUMENAME}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build Container
|
||||||
|
docker build \
|
||||||
|
--platform="${PLATFORM:-linux/amd64}" \
|
||||||
|
--tag="${CONTAINER_IMAGE:-invokeai}" \
|
||||||
|
${CONTAINER_FLAVOR:+--build-arg="CONTAINER_FLAVOR=${CONTAINER_FLAVOR}"} \
|
||||||
|
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
|
||||||
|
${PIP_PACKAGE:+--build-arg="PIP_PACKAGE=${PIP_PACKAGE}"} \
|
||||||
|
--file="${DOCKERFILE}" \
|
||||||
|
..
|
54
docker/env.sh
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# This file is used to set environment variables for the build.sh and run.sh scripts.
|
||||||
|
|
||||||
|
# Try to detect the container flavor if no PIP_EXTRA_INDEX_URL got specified
|
||||||
|
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
|
||||||
|
|
||||||
|
# Activate virtual environment if not already activated and exists
|
||||||
|
if [[ -z $VIRTUAL_ENV ]]; then
|
||||||
|
[[ -e "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" ]] \
|
||||||
|
&& source "$(dirname "${BASH_SOURCE[0]}")/../.venv/bin/activate" \
|
||||||
|
&& echo "Activated virtual environment: $VIRTUAL_ENV"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Decide which container flavor to build if not specified
|
||||||
|
if [[ -z "$CONTAINER_FLAVOR" ]] && python -c "import torch" &>/dev/null; then
|
||||||
|
# Check for CUDA and ROCm
|
||||||
|
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
|
||||||
|
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
|
||||||
|
if [[ "${CUDA_AVAILABLE}" == "True" ]]; then
|
||||||
|
CONTAINER_FLAVOR="cuda"
|
||||||
|
elif [[ "${ROCM_AVAILABLE}" == "True" ]]; then
|
||||||
|
CONTAINER_FLAVOR="rocm"
|
||||||
|
else
|
||||||
|
CONTAINER_FLAVOR="cpu"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set PIP_EXTRA_INDEX_URL based on container flavor
|
||||||
|
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
|
||||||
|
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/rocm"
|
||||||
|
elif [[ "$CONTAINER_FLAVOR" == "cpu" ]]; then
|
||||||
|
PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu"
|
||||||
|
# elif [[ -z "$CONTAINER_FLAVOR" || "$CONTAINER_FLAVOR" == "cuda" ]]; then
|
||||||
|
# PIP_PACKAGE=${PIP_PACKAGE-".[xformers]"}
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Variables shared by build.sh and run.sh
|
||||||
|
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
|
||||||
|
REPOSITORY_NAME="${REPOSITORY_NAME,,}"
|
||||||
|
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME}_data"}"
|
||||||
|
ARCH="${ARCH-$(uname -m)}"
|
||||||
|
PLATFORM="${PLATFORM-linux/${ARCH}}"
|
||||||
|
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
|
||||||
|
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
|
||||||
|
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
|
||||||
|
CONTAINER_FLAVOR="${CONTAINER_FLAVOR-cuda}"
|
||||||
|
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
|
||||||
|
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
|
||||||
|
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"
|
||||||
|
|
||||||
|
# enable docker buildkit
|
||||||
|
export DOCKER_BUILDKIT=1
|
41
docker/run.sh
Executable file
@ -0,0 +1,41 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# How to use: https://invoke-ai.github.io/InvokeAI/installation/040_INSTALL_DOCKER/
|
||||||
|
|
||||||
|
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
||||||
|
cd "$SCRIPTDIR" || exit 1
|
||||||
|
|
||||||
|
source ./env.sh
|
||||||
|
|
||||||
|
# Create outputs directory if it does not exist
|
||||||
|
[[ -d ./outputs ]] || mkdir ./outputs
|
||||||
|
|
||||||
|
echo -e "You are using these values:\n"
|
||||||
|
echo -e "Volumename:\t${VOLUMENAME}"
|
||||||
|
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
|
||||||
|
echo -e "local Models:\t${MODELSPATH:-unset}\n"
|
||||||
|
|
||||||
|
docker run \
|
||||||
|
--interactive \
|
||||||
|
--tty \
|
||||||
|
--rm \
|
||||||
|
--platform="${PLATFORM}" \
|
||||||
|
--name="${REPOSITORY_NAME}" \
|
||||||
|
--hostname="${REPOSITORY_NAME}" \
|
||||||
|
--mount type=volume,volume-driver=local,source="${VOLUMENAME}",target=/data \
|
||||||
|
--mount type=bind,source="$(pwd)"/outputs/,target=/data/outputs/ \
|
||||||
|
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
|
||||||
|
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
|
||||||
|
--publish=9090:9090 \
|
||||||
|
--cap-add=sys_nice \
|
||||||
|
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
|
||||||
|
"${CONTAINER_IMAGE}" ${@:+$@}
|
||||||
|
|
||||||
|
echo -e "\nCleaning trash folder ..."
|
||||||
|
for f in outputs/.Trash*; do
|
||||||
|
if [ -e "$f" ]; then
|
||||||
|
rm -Rf "$f"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
@ -261,7 +261,7 @@ sections describe what's new for InvokeAI.
|
|||||||
[Installation](installation/index.md).
|
[Installation](installation/index.md).
|
||||||
- A streamlined manual installation process that works for both Conda and
|
- A streamlined manual installation process that works for both Conda and
|
||||||
PIP-only installs. See
|
PIP-only installs. See
|
||||||
[Manual Installation](installation/INSTALL_MANUAL.md).
|
[Manual Installation](installation/020_INSTALL_MANUAL.md).
|
||||||
- The ability to save frequently-used startup options (model to load, steps,
|
- The ability to save frequently-used startup options (model to load, steps,
|
||||||
sampler, etc) in a `.invokeai` file. See
|
sampler, etc) in a `.invokeai` file. See
|
||||||
[Client](features/CLI.md)
|
[Client](features/CLI.md)
|
||||||
|
BIN
docs/assets/contributing/html-detail.png
Normal file
After Width: | Height: | Size: 470 KiB |
BIN
docs/assets/contributing/html-overview.png
Normal file
After Width: | Height: | Size: 457 KiB |
BIN
docs/assets/installer-walkthrough/choose-gpu.png
Normal file
After Width: | Height: | Size: 26 KiB |
BIN
docs/assets/installer-walkthrough/confirm-directory.png
Normal file
After Width: | Height: | Size: 84 KiB |
BIN
docs/assets/installer-walkthrough/downloading-models.png
Normal file
After Width: | Height: | Size: 37 KiB |
BIN
docs/assets/installer-walkthrough/installing-models.png
Normal file
After Width: | Height: | Size: 128 KiB |
BIN
docs/assets/installer-walkthrough/settings-form.png
Normal file
After Width: | Height: | Size: 114 KiB |
BIN
docs/assets/installer-walkthrough/unpacked-zipfile.png
Normal file
After Width: | Height: | Size: 56 KiB |
BIN
docs/assets/installing-models/webui-models-1.png
Normal file
After Width: | Height: | Size: 98 KiB |
BIN
docs/assets/installing-models/webui-models-2.png
Normal file
After Width: | Height: | Size: 94 KiB |
BIN
docs/assets/installing-models/webui-models-3.png
Normal file
After Width: | Height: | Size: 99 KiB |
BIN
docs/assets/installing-models/webui-models-4.png
Normal file
After Width: | Height: | Size: 98 KiB |
93
docs/contributing/ARCHITECTURE.md
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
# Invoke.AI Architecture
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TB
|
||||||
|
|
||||||
|
subgraph apps[Applications]
|
||||||
|
webui[WebUI]
|
||||||
|
cli[CLI]
|
||||||
|
|
||||||
|
subgraph webapi[Web API]
|
||||||
|
api[HTTP API]
|
||||||
|
sio[Socket.IO]
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph invoke[Invoke]
|
||||||
|
direction LR
|
||||||
|
invoker
|
||||||
|
services
|
||||||
|
sessions
|
||||||
|
invocations
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph core[AI Core]
|
||||||
|
Generate
|
||||||
|
end
|
||||||
|
|
||||||
|
webui --> webapi
|
||||||
|
webapi --> invoke
|
||||||
|
cli --> invoke
|
||||||
|
|
||||||
|
invoker --> services & sessions
|
||||||
|
invocations --> services
|
||||||
|
sessions --> invocations
|
||||||
|
|
||||||
|
services --> core
|
||||||
|
|
||||||
|
%% Styles
|
||||||
|
classDef sg fill:#5028C8,font-weight:bold,stroke-width:2,color:#fff,stroke:#14141A
|
||||||
|
classDef default stroke-width:2px,stroke:#F6B314,color:#fff,fill:#14141A
|
||||||
|
|
||||||
|
class apps,webapi,invoke,core sg
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Applications
|
||||||
|
|
||||||
|
Applications are built on top of the invoke framework. They should construct `invoker` and then interact through it. They should avoid interacting directly with core code in order to support a variety of configurations.
|
||||||
|
|
||||||
|
### Web UI
|
||||||
|
|
||||||
|
The Web UI is built on top of an HTTP API built with [FastAPI](https://fastapi.tiangolo.com/) and [Socket.IO](https://socket.io/). The frontend code is found in `/frontend` and the backend code is found in `/ldm/invoke/app/api_app.py` and `/ldm/invoke/app/api/`. The code is further organized as such:
|
||||||
|
|
||||||
|
| Component | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| api_app.py | Sets up the API app, annotates the OpenAPI spec with additional data, and runs the API |
|
||||||
|
| dependencies | Creates all invoker services and the invoker, and provides them to the API |
|
||||||
|
| events | An eventing system that could in the future be adapted to support horizontal scale-out |
|
||||||
|
| sockets | The Socket.IO interface - handles listening to and emitting session events (events are defined in the events service module) |
|
||||||
|
| routers | API definitions for different areas of API functionality |
|
||||||
|
|
||||||
|
### CLI
|
||||||
|
|
||||||
|
The CLI is built automatically from invocation metadata, and also supports invocation piping and auto-linking. Code is available in `/ldm/invoke/app/cli_app.py`.
|
||||||
|
|
||||||
|
## Invoke
|
||||||
|
|
||||||
|
The Invoke framework provides the interface to the underlying AI systems and is built with flexibility and extensibility in mind. There are four major concepts: invoker, sessions, invocations, and services.
|
||||||
|
|
||||||
|
### Invoker
|
||||||
|
|
||||||
|
The invoker (`/ldm/invoke/app/services/invoker.py`) is the primary interface through which applications interact with the framework. Its primary purpose is to create, manage, and invoke sessions. It also maintains two sets of services:
|
||||||
|
- **invocation services**, which are used by invocations to interact with core functionality.
|
||||||
|
- **invoker services**, which are used by the invoker to manage sessions and manage the invocation queue.
|
||||||
|
|
||||||
|
### Sessions
|
||||||
|
|
||||||
|
Invocations and links between them form a graph, which is maintained in a session. Sessions can be queued for invocation, which will execute their graph (either the next ready invocation, or all invocations). Sessions also maintain execution history for the graph (including storage of any outputs). An invocation may be added to a session at any time, and there is capability to add and entire graph at once, as well as to automatically link new invocations to previous invocations. Invocations can not be deleted or modified once added.
|
||||||
|
|
||||||
|
The session graph does not support looping. This is left as an application problem to prevent additional complexity in the graph.
|
||||||
|
|
||||||
|
### Invocations
|
||||||
|
|
||||||
|
Invocations represent individual units of execution, with inputs and outputs. All invocations are located in `/ldm/invoke/app/invocations`, and are all automatically discovered and made available in the applications. These are the primary way to expose new functionality in Invoke.AI, and the [implementation guide](INVOCATIONS.md) explains how to add new invocations.
|
||||||
|
|
||||||
|
### Services
|
||||||
|
|
||||||
|
Services provide invocations access AI Core functionality and other necessary functionality (e.g. image storage). These are available in `/ldm/invoke/app/services`. As a general rule, new services should provide an interface as an abstract base class, and may provide a lightweight local implementation by default in their module. The goal for all services should be to enable the usage of different implementations (e.g. using cloud storage for image storage), but should not load any module dependencies unless that implementation has been used (i.e. don't import anything that won't be used, especially if it's expensive to import).
|
||||||
|
|
||||||
|
## AI Core
|
||||||
|
|
||||||
|
The AI Core is represented by the rest of the code base (i.e. the code outside of `/ldm/invoke/app/`).
|
105
docs/contributing/INVOCATIONS.md
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
# Invocations
|
||||||
|
|
||||||
|
Invocations represent a single operation, its inputs, and its outputs. These operations and their outputs can be chained together to generate and modify images.
|
||||||
|
|
||||||
|
## Creating a new invocation
|
||||||
|
|
||||||
|
To create a new invocation, either find the appropriate module file in `/ldm/invoke/app/invocations` to add your invocation to, or create a new one in that folder. All invocations in that folder will be discovered and made available to the CLI and API automatically. Invocations make use of [typing](https://docs.python.org/3/library/typing.html) and [pydantic](https://pydantic-docs.helpmanual.io/) for validation and integration into the CLI and API.
|
||||||
|
|
||||||
|
An invocation looks like this:
|
||||||
|
|
||||||
|
```py
|
||||||
|
class UpscaleInvocation(BaseInvocation):
|
||||||
|
"""Upscales an image."""
|
||||||
|
type: Literal['upscale'] = 'upscale'
|
||||||
|
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField,None] = Field(description="The input image")
|
||||||
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||||
|
level: Literal[2,4] = Field(default=2, description = "The upscale level")
|
||||||
|
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(self.image.image_type, self.image.image_name)
|
||||||
|
results = context.services.generate.upscale_and_reconstruct(
|
||||||
|
image_list = [[image, 0]],
|
||||||
|
upscale = (self.level, self.strength),
|
||||||
|
strength = 0.0, # GFPGAN strength
|
||||||
|
save_original = False,
|
||||||
|
image_callback = None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now
|
||||||
|
# TODO: can this return multiple results?
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
|
||||||
|
context.services.images.save(image_type, image_name, results[0][0])
|
||||||
|
return ImageOutput(
|
||||||
|
image = ImageField(image_type = image_type, image_name = image_name)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Each portion is important to implement correctly.
|
||||||
|
|
||||||
|
### Class definition and type
|
||||||
|
```py
|
||||||
|
class UpscaleInvocation(BaseInvocation):
|
||||||
|
"""Upscales an image."""
|
||||||
|
type: Literal['upscale'] = 'upscale'
|
||||||
|
```
|
||||||
|
All invocations must derive from `BaseInvocation`. They should have a docstring that declares what they do in a single, short line. They should also have a `type` with a type hint that's `Literal["command_name"]`, where `command_name` is what the user will type on the CLI or use in the API to create this invocation. The `command_name` must be unique. The `type` must be assigned to the value of the literal in the type hint.
|
||||||
|
|
||||||
|
### Inputs
|
||||||
|
```py
|
||||||
|
# Inputs
|
||||||
|
image: Union[ImageField,None] = Field(description="The input image")
|
||||||
|
strength: float = Field(default=0.75, gt=0, le=1, description="The strength")
|
||||||
|
level: Literal[2,4] = Field(default=2, description="The upscale level")
|
||||||
|
```
|
||||||
|
Inputs consist of three parts: a name, a type hint, and a `Field` with default, description, and validation information. For example:
|
||||||
|
| Part | Value | Description |
|
||||||
|
| ---- | ----- | ----------- |
|
||||||
|
| Name | `strength` | This field is referred to as `strength` |
|
||||||
|
| Type Hint | `float` | This field must be of type `float` |
|
||||||
|
| Field | `Field(default=0.75, gt=0, le=1, description="The strength")` | The default value is `0.75`, the value must be in the range (0,1], and help text will show "The strength" for this field. |
|
||||||
|
|
||||||
|
Notice that `image` has type `Union[ImageField,None]`. The `Union` allows this field to be parsed with `None` as a value, which enables linking to previous invocations. All fields should either provide a default value or allow `None` as a value, so that they can be overwritten with a linked output from another invocation.
|
||||||
|
|
||||||
|
The special type `ImageField` is also used here. All images are passed as `ImageField`, which protects them from pydantic validation errors (since images only ever come from links).
|
||||||
|
|
||||||
|
Finally, note that for all linking, the `type` of the linked fields must match. If the `name` also matches, then the field can be **automatically linked** to a previous invocation by name and matching.
|
||||||
|
|
||||||
|
### Invoke Function
|
||||||
|
```py
|
||||||
|
def invoke(self, context: InvocationContext) -> ImageOutput:
|
||||||
|
image = context.services.images.get(self.image.image_type, self.image.image_name)
|
||||||
|
results = context.services.generate.upscale_and_reconstruct(
|
||||||
|
image_list = [[image, 0]],
|
||||||
|
upscale = (self.level, self.strength),
|
||||||
|
strength = 0.0, # GFPGAN strength
|
||||||
|
save_original = False,
|
||||||
|
image_callback = None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Results are image and seed, unwrap for now
|
||||||
|
image_type = ImageType.RESULT
|
||||||
|
image_name = context.services.images.create_name(context.graph_execution_state_id, self.id)
|
||||||
|
context.services.images.save(image_type, image_name, results[0][0])
|
||||||
|
return ImageOutput(
|
||||||
|
image = ImageField(image_type = image_type, image_name = image_name)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
The `invoke` function is the last portion of an invocation. It is provided an `InvocationContext` which contains services to perform work as well as a `session_id` for use as needed. It should return a class with output values that derives from `BaseInvocationOutput`.
|
||||||
|
|
||||||
|
Before being called, the invocation will have all of its fields set from defaults, inputs, and finally links (overriding in that order).
|
||||||
|
|
||||||
|
Assume that this invocation may be running simultaneously with other invocations, may be running on another machine, or in other interesting scenarios. If you need functionality, please provide it as a service in the `InvocationServices` class, and make sure it can be overridden.
|
||||||
|
|
||||||
|
### Outputs
|
||||||
|
```py
|
||||||
|
class ImageOutput(BaseInvocationOutput):
|
||||||
|
"""Base class for invocations that output an image"""
|
||||||
|
type: Literal['image'] = 'image'
|
||||||
|
|
||||||
|
image: ImageField = Field(default=None, description="The output image")
|
||||||
|
```
|
||||||
|
Output classes look like an invocation class without the invoke method. Prefer to use an existing output class if available, and prefer to name inputs the same as outputs when possible, to promote automatic invocation linking.
|
83
docs/contributing/LOCAL_DEVELOPMENT.md
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
# Local Development
|
||||||
|
|
||||||
|
If you are looking to contribute you will need to have a local development
|
||||||
|
environment. See the
|
||||||
|
[Developer Install](../installation/020_INSTALL_MANUAL.md#developer-install) for
|
||||||
|
full details.
|
||||||
|
|
||||||
|
Broadly this involves cloning the repository, installing the pre-reqs, and
|
||||||
|
InvokeAI (in editable form). Assuming this is working, choose your area of
|
||||||
|
focus.
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
We use [mkdocs](https://www.mkdocs.org) for our documentation with the
|
||||||
|
[material theme](https://squidfunk.github.io/mkdocs-material/). Documentation is
|
||||||
|
written in markdown files under the `./docs` folder and then built into a static
|
||||||
|
website for hosting with GitHub Pages at
|
||||||
|
[invoke-ai.github.io/InvokeAI](https://invoke-ai.github.io/InvokeAI).
|
||||||
|
|
||||||
|
To contribute to the documentation you'll need to install the dependencies. Note
|
||||||
|
the use of `"`.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pip install ".[docs]"
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, to run the documentation locally with hot-reloading for changes made.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
mkdocs serve
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll then be prompted to connect to `http://127.0.0.1:8080` in order to
|
||||||
|
access.
|
||||||
|
|
||||||
|
## Backend
|
||||||
|
|
||||||
|
The backend is contained within the `./invokeai/backend` folder structure. To
|
||||||
|
get started however please install the development dependencies.
|
||||||
|
|
||||||
|
From the root of the repository run the following command. Note the use of `"`.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pip install ".[test]"
|
||||||
|
```
|
||||||
|
|
||||||
|
This in an optional group of packages which is defined within the
|
||||||
|
`pyproject.toml` and will be required for testing the changes you make the the
|
||||||
|
code.
|
||||||
|
|
||||||
|
### Running Tests
|
||||||
|
|
||||||
|
We use [pytest](https://docs.pytest.org/en/7.2.x/) for our test suite. Tests can
|
||||||
|
be found under the `./tests` folder and can be run with a single `pytest`
|
||||||
|
command. Optionally, to review test coverage you can append `--cov`.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pytest --cov
|
||||||
|
```
|
||||||
|
|
||||||
|
Test outcomes and coverage will be reported in the terminal. In addition a more
|
||||||
|
detailed report is created in both XML and HTML format in the `./coverage`
|
||||||
|
folder. The HTML one in particular can help identify missing statements
|
||||||
|
requiring tests to ensure coverage. This can be run by opening
|
||||||
|
`./coverage/html/index.html`.
|
||||||
|
|
||||||
|
For example.
|
||||||
|
|
||||||
|
```zsh
|
||||||
|
pytest --cov; open ./coverage/html/index.html
|
||||||
|
```
|
||||||
|
|
||||||
|
??? info "HTML coverage report output"
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## Front End
|
||||||
|
|
||||||
|
<!--#TODO: get input from blessedcoolant here, for the moment inserted the frontend README via snippets extension.-->
|
||||||
|
|
||||||
|
--8<-- "invokeai/frontend/web/README.md"
|
@ -6,38 +6,51 @@ title: Command-Line Interface
|
|||||||
|
|
||||||
## **Interactive Command Line Interface**
|
## **Interactive Command Line Interface**
|
||||||
|
|
||||||
The `invoke.py` script, located in `scripts/`, provides an interactive interface
|
The InvokeAI command line interface (CLI) provides scriptable access
|
||||||
to image generation similar to the "invoke mothership" bot that Stable AI
|
to InvokeAI's features.Some advanced features are only available
|
||||||
provided on its Discord server.
|
through the CLI, though they eventually find their way into the WebUI.
|
||||||
|
|
||||||
Unlike the `txt2img.py` and `img2img.py` scripts provided in the original
|
The CLI is accessible from the `invoke.sh`/`invoke.bat` launcher by
|
||||||
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion) source
|
selecting option (1). Alternatively, it can be launched directly from
|
||||||
code repository, the time-consuming initialization of the AI model
|
the command line by activating the InvokeAI environment and giving the
|
||||||
initialization only happens once. After that image generation from the
|
command:
|
||||||
command-line interface is very fast.
|
|
||||||
|
```bash
|
||||||
|
invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
After some startup messages, you will be presented with the `invoke> `
|
||||||
|
prompt. Here you can type prompts to generate images and issue other
|
||||||
|
commands to load and manipulate generative models. The CLI has a large
|
||||||
|
number of command-line options that control its behavior. To get a
|
||||||
|
concise summary of the options, call `invokeai` with the `--help` argument:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invokeai --help
|
||||||
|
```
|
||||||
|
|
||||||
The script uses the readline library to allow for in-line editing, command
|
The script uses the readline library to allow for in-line editing, command
|
||||||
history (++up++ and ++down++), autocompletion, and more. To help keep track of
|
history (++up++ and ++down++), autocompletion, and more. To help keep track of
|
||||||
which prompts generated which images, the script writes a log file of image
|
which prompts generated which images, the script writes a log file of image
|
||||||
names and prompts to the selected output directory.
|
names and prompts to the selected output directory.
|
||||||
|
|
||||||
In addition, as of version 1.02, it also writes the prompt into the PNG file's
|
Here is a typical session
|
||||||
metadata where it can be retrieved using `scripts/images2prompt.py`
|
|
||||||
|
|
||||||
The script is confirmed to work on Linux, Windows and Mac systems.
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
|
|
||||||
This script runs from the command-line or can be used as a Web application. The Web GUI is
|
|
||||||
currently rudimentary, but a much better replacement is on its way.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/stable-diffusion$ python3 ./scripts/invoke.py
|
PS1:C:\Users\fred> invokeai
|
||||||
* Initializing, be patient...
|
* Initializing, be patient...
|
||||||
Loading model from models/ldm/text2img-large/model.ckpt
|
* Initializing, be patient...
|
||||||
|
>> Initialization file /home/lstein/invokeai/invokeai.init found. Loading...
|
||||||
|
>> Internet connectivity is True
|
||||||
|
>> InvokeAI, version 2.3.0-rc5
|
||||||
|
>> InvokeAI runtime directory is "/home/lstein/invokeai"
|
||||||
|
>> GFPGAN Initialized
|
||||||
|
>> CodeFormer Initialized
|
||||||
|
>> ESRGAN Initialized
|
||||||
|
>> Using device_type cuda
|
||||||
|
>> xformers memory-efficient attention is available and enabled
|
||||||
(...more initialization messages...)
|
(...more initialization messages...)
|
||||||
|
* Initialization done! Awaiting your command (-h for help, 'q' to quit)
|
||||||
* Initialization done! Awaiting your command...
|
|
||||||
invoke> ashley judd riding a camel -n2 -s150
|
invoke> ashley judd riding a camel -n2 -s150
|
||||||
Outputs:
|
Outputs:
|
||||||
outputs/img-samples/00009.png: "ashley judd riding a camel" -n2 -s150 -S 416354203
|
outputs/img-samples/00009.png: "ashley judd riding a camel" -n2 -s150 -S 416354203
|
||||||
@ -47,27 +60,15 @@ invoke> "there's a fly in my soup" -n6 -g
|
|||||||
outputs/img-samples/00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
|
outputs/img-samples/00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
|
||||||
seeds for individual rows: [2685670268, 1216708065, 2335773498, 822223658, 714542046, 3395302430]
|
seeds for individual rows: [2685670268, 1216708065, 2335773498, 822223658, 714542046, 3395302430]
|
||||||
invoke> q
|
invoke> q
|
||||||
|
|
||||||
# this shows how to retrieve the prompt stored in the saved image's metadata
|
|
||||||
(invokeai) ~/stable-diffusion$ python ./scripts/images2prompt.py outputs/img_samples/*.png
|
|
||||||
00009.png: "ashley judd riding a camel" -s150 -S 416354203
|
|
||||||
00010.png: "ashley judd riding a camel" -s150 -S 1362479620
|
|
||||||
00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
|
|
||||||
```
|
```
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
The `invoke>` prompt's arguments are pretty much identical to those used in the
|
|
||||||
Discord bot, except you don't need to type `!invoke` (it doesn't hurt if you
|
|
||||||
do). A significant change is that creation of individual images is now the
|
|
||||||
default unless `--grid` (`-g`) is given. A full list is given in
|
|
||||||
[List of prompt arguments](#list-of-prompt-arguments).
|
|
||||||
|
|
||||||
## Arguments
|
## Arguments
|
||||||
|
|
||||||
The script itself also recognizes a series of command-line switches that will
|
The script recognizes a series of command-line switches that will
|
||||||
change important global defaults, such as the directory for image outputs and
|
change important global defaults, such as the directory for image
|
||||||
the location of the model weight files.
|
outputs and the location of the model weight files.
|
||||||
|
|
||||||
### List of arguments recognized at the command line
|
### List of arguments recognized at the command line
|
||||||
|
|
||||||
@ -82,10 +83,14 @@ overridden on a per-prompt basis (see
|
|||||||
| `--outdir <path>` | `-o<path>` | `outputs/img_samples` | Location for generated images. |
|
| `--outdir <path>` | `-o<path>` | `outputs/img_samples` | Location for generated images. |
|
||||||
| `--prompt_as_dir` | `-p` | `False` | Name output directories using the prompt text. |
|
| `--prompt_as_dir` | `-p` | `False` | Name output directories using the prompt text. |
|
||||||
| `--from_file <path>` | | `None` | Read list of prompts from a file. Use `-` to read from standard input |
|
| `--from_file <path>` | | `None` | Read list of prompts from a file. Use `-` to read from standard input |
|
||||||
| `--model <modelname>` | | `stable-diffusion-1.4` | Loads model specified in configs/models.yaml. Currently one of "stable-diffusion-1.4" or "laion400m" |
|
| `--model <modelname>` | | `stable-diffusion-1.5` | Loads the initial model specified in configs/models.yaml. |
|
||||||
| `--full_precision` | `-F` | `False` | Run in slower full-precision mode. Needed for Macintosh M1/M2 hardware and some older video cards. |
|
| `--ckpt_convert ` | | `False` | If provided both .ckpt and .safetensors files will be auto-converted into diffusers format in memory |
|
||||||
|
| `--autoconvert <path>` | | `None` | On startup, scan the indicated directory for new .ckpt/.safetensor files and automatically convert and import them |
|
||||||
|
| `--precision` | | `fp16` | Provide `fp32` for full precision mode, `fp16` for half-precision. `fp32` needed for Macintoshes and some NVidia cards. |
|
||||||
| `--png_compression <0-9>` | `-z<0-9>` | `6` | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
|
| `--png_compression <0-9>` | `-z<0-9>` | `6` | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
|
||||||
| `--safety-checker` | | `False` | Activate safety checker for NSFW and other potentially disturbing imagery |
|
| `--safety-checker` | | `False` | Activate safety checker for NSFW and other potentially disturbing imagery |
|
||||||
|
| `--patchmatch`, `--no-patchmatch` | | `--patchmatch` | Load/Don't load the PatchMatch inpainting extension |
|
||||||
|
| `--xformers`, `--no-xformers` | | `--xformers` | Load/Don't load the Xformers memory-efficient attention module (CUDA only) |
|
||||||
| `--web` | | `False` | Start in web server mode |
|
| `--web` | | `False` | Start in web server mode |
|
||||||
| `--host <ip addr>` | | `localhost` | Which network interface web server should listen on. Set to 0.0.0.0 to listen on any. |
|
| `--host <ip addr>` | | `localhost` | Which network interface web server should listen on. Set to 0.0.0.0 to listen on any. |
|
||||||
| `--port <port>` | | `9090` | Which port web server should listen for requests on. |
|
| `--port <port>` | | `9090` | Which port web server should listen for requests on. |
|
||||||
@ -109,6 +114,7 @@ overridden on a per-prompt basis (see
|
|||||||
|
|
||||||
| Argument | Shortcut | Default | Description |
|
| Argument | Shortcut | Default | Description |
|
||||||
|--------------------|------------|---------------------|--------------|
|
|--------------------|------------|---------------------|--------------|
|
||||||
|
| `--full_precision` | | `False` | Same as `--precision=fp32`|
|
||||||
| `--weights <path>` | | `None` | Path to weights file; use `--model stable-diffusion-1.4` instead |
|
| `--weights <path>` | | `None` | Path to weights file; use `--model stable-diffusion-1.4` instead |
|
||||||
| `--laion400m` | `-l` | `False` | Use older LAION400m weights; use `--model=laion400m` instead |
|
| `--laion400m` | `-l` | `False` | Use older LAION400m weights; use `--model=laion400m` instead |
|
||||||
|
|
||||||
@ -208,6 +214,8 @@ Here are the invoke> command that apply to txt2img:
|
|||||||
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
|
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
|
||||||
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
||||||
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
||||||
|
| `--h_symmetry_time_pct <float>` | | `None` | Create symmetry along the X axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
||||||
|
| `--v_symmetry_time_pct <float>` | | `None` | Create symmetry along the Y axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
|
|
||||||
@ -336,8 +344,10 @@ useful for debugging the text masking process prior to inpainting with the
|
|||||||
|
|
||||||
### Model selection and importation
|
### Model selection and importation
|
||||||
|
|
||||||
The CLI allows you to add new models on the fly, as well as to switch among them
|
The CLI allows you to add new models on the fly, as well as to switch
|
||||||
rapidly without leaving the script.
|
among them rapidly without leaving the script. There are several
|
||||||
|
different model formats, each described in the [Model Installation
|
||||||
|
Guide](../installation/050_INSTALLING_MODELS.md).
|
||||||
|
|
||||||
#### `!models`
|
#### `!models`
|
||||||
|
|
||||||
@ -347,9 +357,9 @@ model is bold-faced
|
|||||||
Example:
|
Example:
|
||||||
|
|
||||||
<pre>
|
<pre>
|
||||||
laion400m not loaded <no description>
|
inpainting-1.5 not loaded Stable Diffusion inpainting model
|
||||||
<b>stable-diffusion-1.4 active Stable Diffusion v1.4</b>
|
<b>stable-diffusion-1.5 active Stable Diffusion v1.5</b>
|
||||||
waifu-diffusion not loaded Waifu Diffusion v1.3
|
waifu-diffusion not loaded Waifu Diffusion v1.4
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
#### `!switch <model>`
|
#### `!switch <model>`
|
||||||
@ -361,43 +371,30 @@ Note how the second column of the `!models` table changes to `cached` after a
|
|||||||
model is first loaded, and that the long initialization step is not needed when
|
model is first loaded, and that the long initialization step is not needed when
|
||||||
loading a cached model.
|
loading a cached model.
|
||||||
|
|
||||||
<pre>
|
#### `!import_model <hugging_face_repo_ID>`
|
||||||
invoke> !models
|
|
||||||
laion400m not loaded <no description>
|
|
||||||
<b>stable-diffusion-1.4 cached Stable Diffusion v1.4</b>
|
|
||||||
waifu-diffusion active Waifu Diffusion v1.3
|
|
||||||
|
|
||||||
invoke> !switch waifu-diffusion
|
This imports and installs a `diffusers`-style model that is stored on
|
||||||
>> Caching model stable-diffusion-1.4 in system RAM
|
the [HuggingFace Web Site](https://huggingface.co). You can look up
|
||||||
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
|
any [Stable Diffusion diffusers
|
||||||
| LatentDiffusion: Running in eps-prediction mode
|
model](https://huggingface.co/models?library=diffusers) and install it
|
||||||
| DiffusionWrapper has 859.52 M params.
|
with a command like the following:
|
||||||
| Making attention of type 'vanilla' with 512 in_channels
|
|
||||||
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
|
||||||
| Making attention of type 'vanilla' with 512 in_channels
|
|
||||||
| Using faster float16 precision
|
|
||||||
>> Model loaded in 18.24s
|
|
||||||
>> Max VRAM used to load the model: 2.17G
|
|
||||||
>> Current VRAM usage:2.17G
|
|
||||||
>> Setting Sampler to k_lms
|
|
||||||
|
|
||||||
invoke> !models
|
```bash
|
||||||
laion400m not loaded <no description>
|
!import_model prompthero/openjourney
|
||||||
stable-diffusion-1.4 cached Stable Diffusion v1.4
|
```
|
||||||
<b>waifu-diffusion active Waifu Diffusion v1.3</b>
|
|
||||||
|
|
||||||
invoke> !switch stable-diffusion-1.4
|
#### `!import_model <path/to/diffusers/directory>`
|
||||||
>> Caching model waifu-diffusion in system RAM
|
|
||||||
>> Retrieving model stable-diffusion-1.4 from system RAM cache
|
|
||||||
>> Setting Sampler to k_lms
|
|
||||||
|
|
||||||
invoke> !models
|
If you have a copy of a `diffusers`-style model saved to disk, you can
|
||||||
laion400m not loaded <no description>
|
import it by passing the path to model's top-level directory.
|
||||||
<b>stable-diffusion-1.4 active Stable Diffusion v1.4</b>
|
|
||||||
waifu-diffusion cached Waifu Diffusion v1.3
|
|
||||||
</pre>
|
|
||||||
|
|
||||||
#### `!import_model <path/to/model/weights>`
|
#### `!import_model <url>`
|
||||||
|
|
||||||
|
For a `.ckpt` or `.safetensors` file, if you have a direct download
|
||||||
|
URL for the file, you can provide it to `!import_model` and the file
|
||||||
|
will be downloaded and installed for you.
|
||||||
|
|
||||||
|
#### `!import_model <path/to/model/weights.ckpt>`
|
||||||
|
|
||||||
This command imports a new model weights file into InvokeAI, makes it available
|
This command imports a new model weights file into InvokeAI, makes it available
|
||||||
for image generation within the script, and writes out the configuration for the
|
for image generation within the script, and writes out the configuration for the
|
||||||
@ -417,35 +414,12 @@ below, the bold-faced text shows what the user typed in with the exception of
|
|||||||
the width, height and configuration file paths, which were filled in
|
the width, height and configuration file paths, which were filled in
|
||||||
automatically.
|
automatically.
|
||||||
|
|
||||||
Example:
|
#### `!import_model <path/to/directory_of_models>`
|
||||||
|
|
||||||
<pre>
|
If you provide the path of a directory that contains one or more
|
||||||
invoke> <b>!import_model models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt</b>
|
`.ckpt` or `.safetensors` files, the CLI will scan the directory and
|
||||||
>> Model import in process. Please enter the values needed to configure this model:
|
interactively offer to import the models it finds there. Also see the
|
||||||
|
`--autoconvert` command-line option.
|
||||||
Name for this model: <b>waifu-diffusion</b>
|
|
||||||
Description of this model: <b>Waifu Diffusion v1.3</b>
|
|
||||||
Configuration file for this model: <b>configs/stable-diffusion/v1-inference.yaml</b>
|
|
||||||
Default image width: <b>512</b>
|
|
||||||
Default image height: <b>512</b>
|
|
||||||
>> New configuration:
|
|
||||||
waifu-diffusion:
|
|
||||||
config: configs/stable-diffusion/v1-inference.yaml
|
|
||||||
description: Waifu Diffusion v1.3
|
|
||||||
height: 512
|
|
||||||
weights: models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
|
|
||||||
width: 512
|
|
||||||
OK to import [n]? <b>y</b>
|
|
||||||
>> Caching model stable-diffusion-1.4 in system RAM
|
|
||||||
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
|
|
||||||
| LatentDiffusion: Running in eps-prediction mode
|
|
||||||
| DiffusionWrapper has 859.52 M params.
|
|
||||||
| Making attention of type 'vanilla' with 512 in_channels
|
|
||||||
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
|
||||||
| Making attention of type 'vanilla' with 512 in_channels
|
|
||||||
| Using faster float16 precision
|
|
||||||
invoke>
|
|
||||||
</pre>
|
|
||||||
|
|
||||||
#### `!edit_model <name_of_model>`
|
#### `!edit_model <name_of_model>`
|
||||||
|
|
||||||
@ -479,11 +453,6 @@ OK to import [n]? y
|
|||||||
...
|
...
|
||||||
</pre>
|
</pre>
|
||||||
|
|
||||||
======= invoke> !fix 000017.4829112.gfpgan-00.png --embiggen 3 ...lots of
|
|
||||||
text... Outputs: [2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix
|
|
||||||
"outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512
|
|
||||||
-H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 ```
|
|
||||||
|
|
||||||
### History processing
|
### History processing
|
||||||
|
|
||||||
The CLI provides a series of convenient commands for reviewing previous actions,
|
The CLI provides a series of convenient commands for reviewing previous actions,
|
||||||
|
@ -4,13 +4,24 @@ title: Image-to-Image
|
|||||||
|
|
||||||
# :material-image-multiple: Image-to-Image
|
# :material-image-multiple: Image-to-Image
|
||||||
|
|
||||||
## `img2img`
|
Both the Web and command-line interfaces provide an "img2img" feature
|
||||||
|
that lets you seed your creations with an initial drawing or
|
||||||
|
photo. This is a really cool feature that tells stable diffusion to
|
||||||
|
build the prompt on top of the image you provide, preserving the
|
||||||
|
original's basic shape and layout.
|
||||||
|
|
||||||
This script also provides an `img2img` feature that lets you seed your creations
|
See the [WebUI Guide](WEB.md) for a walkthrough of the img2img feature
|
||||||
with an initial drawing or photo. This is a really cool feature that tells
|
in the InvokeAI web server. This document describes how to use img2img
|
||||||
stable diffusion to build the prompt on top of the image you provide, preserving
|
in the command-line tool.
|
||||||
the original's basic shape and layout. To use it, provide the `--init_img`
|
|
||||||
option as shown here:
|
## Basic Usage
|
||||||
|
|
||||||
|
Launch the command-line client by launching `invoke.sh`/`invoke.bat`
|
||||||
|
and choosing option (1). Alternative, activate the InvokeAI
|
||||||
|
environment and issue the command `invokeai`.
|
||||||
|
|
||||||
|
Once the `invoke> ` prompt appears, you can start an img2img render by
|
||||||
|
pointing to a seed file with the `-I` option as shown here:
|
||||||
|
|
||||||
!!! example ""
|
!!! example ""
|
||||||
|
|
||||||
|
@ -168,11 +168,15 @@ used by Stable Diffusion 1.4 and 1.5.
|
|||||||
After installation, your `models.yaml` should contain an entry that looks like
|
After installation, your `models.yaml` should contain an entry that looks like
|
||||||
this one:
|
this one:
|
||||||
|
|
||||||
inpainting-1.5: weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
```yml
|
||||||
description: SD inpainting v1.5 config:
|
inpainting-1.5:
|
||||||
configs/stable-diffusion/v1-inpainting-inference.yaml vae:
|
weights: models/ldm/stable-diffusion-v1/sd-v1-5-inpainting.ckpt
|
||||||
models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt width: 512
|
description: SD inpainting v1.5
|
||||||
|
config: configs/stable-diffusion/v1-inpainting-inference.yaml
|
||||||
|
vae: models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||||
|
width: 512
|
||||||
height: 512
|
height: 512
|
||||||
|
```
|
||||||
|
|
||||||
As shown in the example, you may include a VAE fine-tuning weights file as well.
|
As shown in the example, you may include a VAE fine-tuning weights file as well.
|
||||||
This is strongly recommended.
|
This is strongly recommended.
|
||||||
|
@ -40,7 +40,7 @@ for adj in adjectives:
|
|||||||
print(f'a {adj} day -A{samp} -C{cg}')
|
print(f'a {adj} day -A{samp} -C{cg}')
|
||||||
```
|
```
|
||||||
|
|
||||||
It's output looks like this (abbreviated):
|
Its output looks like this (abbreviated):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
a sunny day -Aklms -C7.5
|
a sunny day -Aklms -C7.5
|
||||||
|
@ -17,7 +17,7 @@ notebooks.
|
|||||||
|
|
||||||
You will need a GPU to perform training in a reasonable length of
|
You will need a GPU to perform training in a reasonable length of
|
||||||
time, and at least 12 GB of VRAM. We recommend using the [`xformers`
|
time, and at least 12 GB of VRAM. We recommend using the [`xformers`
|
||||||
library](../installation/070_INSTALL_XFORMERS) to accelerate the
|
library](../installation/070_INSTALL_XFORMERS.md) to accelerate the
|
||||||
training process further. During training, about ~8 GB is temporarily
|
training process further. During training, about ~8 GB is temporarily
|
||||||
needed in order to store intermediate models, checkpoints and logs.
|
needed in order to store intermediate models, checkpoints and logs.
|
||||||
|
|
||||||
@ -54,8 +54,7 @@ Please enter 1, 2, 3, or 4: [1] 3
|
|||||||
```
|
```
|
||||||
|
|
||||||
From the command line, with the InvokeAI virtual environment active,
|
From the command line, with the InvokeAI virtual environment active,
|
||||||
you can launch the front end with the command `textual_inversion
|
you can launch the front end with the command `invokeai-ti --gui`.
|
||||||
--gui`.
|
|
||||||
|
|
||||||
This will launch a text-based front end that will look like this:
|
This will launch a text-based front end that will look like this:
|
||||||
|
|
||||||
@ -227,12 +226,12 @@ It accepts a large number of arguments, which can be summarized by
|
|||||||
passing the `--help` argument:
|
passing the `--help` argument:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
textual_inversion --help
|
invokeai-ti --help
|
||||||
```
|
```
|
||||||
|
|
||||||
Typical usage is shown here:
|
Typical usage is shown here:
|
||||||
```sh
|
```sh
|
||||||
textual_inversion \
|
invokeai-ti \
|
||||||
--model=stable-diffusion-1.5 \
|
--model=stable-diffusion-1.5 \
|
||||||
--resolution=512 \
|
--resolution=512 \
|
||||||
--learnable_property=style \
|
--learnable_property=style \
|
||||||
@ -251,6 +250,24 @@ textual_inversion \
|
|||||||
--only_save_embeds
|
--only_save_embeds
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Using Embeddings
|
||||||
|
|
||||||
|
After training completes, the resultant embeddings will be saved into your `$INVOKEAI_ROOT/embeddings/<trigger word>/learned_embeds.bin`.
|
||||||
|
|
||||||
|
These will be automatically loaded when you start InvokeAI.
|
||||||
|
|
||||||
|
Add the trigger word, surrounded by angle brackets, to use that embedding. For example, if your trigger word was `terence`, use `<terence>` in prompts. This is the same syntax used by the HuggingFace concepts library.
|
||||||
|
|
||||||
|
**Note:** `.pt` embeddings do not require the angle brackets.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### `Cannot load embedding for <trigger>. It was trained on a model with token dimension 1024, but the current model has token dimension 768`
|
||||||
|
|
||||||
|
Messages like this indicate you trained the embedding on a different base model than the currently selected one.
|
||||||
|
|
||||||
|
For example, in the error above, the training was done on SD2.1 (768x768) but it was used on SD1.5 (512x512).
|
||||||
|
|
||||||
## Reading
|
## Reading
|
||||||
|
|
||||||
For more information on textual inversion, please see the following
|
For more information on textual inversion, please see the following
|
||||||
|
@ -5,11 +5,14 @@ title: InvokeAI Web Server
|
|||||||
# :material-web: InvokeAI Web Server
|
# :material-web: InvokeAI Web Server
|
||||||
|
|
||||||
As of version 2.0.0, this distribution comes with a full-featured web server
|
As of version 2.0.0, this distribution comes with a full-featured web server
|
||||||
(see screenshot). To use it, run the `invoke.py` script by adding the `--web`
|
(see screenshot).
|
||||||
option:
|
|
||||||
|
To use it, launch the `invoke.sh`/`invoke.bat` script and select
|
||||||
|
option (2). Alternatively, with the InvokeAI environment active, run
|
||||||
|
the `invokeai` script by adding the `--web` option:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web
|
invokeai --web
|
||||||
```
|
```
|
||||||
|
|
||||||
You can then connect to the server by pointing your web browser at
|
You can then connect to the server by pointing your web browser at
|
||||||
@ -19,17 +22,23 @@ address of the host you are running it on, or the wildcard `0.0.0.0`. For
|
|||||||
example:
|
example:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web --host 0.0.0.0
|
invoke.sh --host 0.0.0.0
|
||||||
```
|
```
|
||||||
|
|
||||||
## Quick guided walkthrough of the WebGUI's features
|
or
|
||||||
|
|
||||||
While most of the WebGUI's features are intuitive, here is a guided walkthrough
|
```bash
|
||||||
|
invokeai --web --host 0.0.0.0
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quick guided walkthrough of the WebUI's features
|
||||||
|
|
||||||
|
While most of the WebUI's features are intuitive, here is a guided walkthrough
|
||||||
through its various components.
|
through its various components.
|
||||||
|
|
||||||
{:width="640px"}
|
{:width="640px"}
|
||||||
|
|
||||||
The screenshot above shows the Text to Image tab of the WebGUI. There are three
|
The screenshot above shows the Text to Image tab of the WebUI. There are three
|
||||||
main sections:
|
main sections:
|
||||||
|
|
||||||
1. A **control panel** on the left, which contains various settings for text to
|
1. A **control panel** on the left, which contains various settings for text to
|
||||||
@ -63,12 +72,14 @@ From top to bottom, these are:
|
|||||||
1. Text to Image - generate images from text
|
1. Text to Image - generate images from text
|
||||||
2. Image to Image - from an uploaded starting image (drawing or photograph)
|
2. Image to Image - from an uploaded starting image (drawing or photograph)
|
||||||
generate a new one, modified by the text prompt
|
generate a new one, modified by the text prompt
|
||||||
3. Inpainting (pending) - Interactively erase portions of a starting image and
|
3. Unified Canvas - Interactively combine multiple images, extend them
|
||||||
have the AI fill in the erased region from a text prompt.
|
with outpainting,and modify interior portions of the image with
|
||||||
4. Outpainting (pending) - Interactively add blank space to the borders of a
|
inpainting, erase portions of a starting image and have the AI fill in
|
||||||
starting image and fill in the background from a text prompt.
|
the erased region from a text prompt.
|
||||||
5. Postprocessing (pending) - Interactively postprocess generated images using a
|
4. Workflow Management (not yet implemented) - this panel will allow you to create
|
||||||
variety of filters.
|
pipelines of common operations and combine them into workflows.
|
||||||
|
5. Training (not yet implemented) - this panel will provide an interface to [textual
|
||||||
|
inversion training](TEXTUAL_INVERSION.md) and fine tuning.
|
||||||
|
|
||||||
The inpainting, outpainting and postprocessing tabs are currently in
|
The inpainting, outpainting and postprocessing tabs are currently in
|
||||||
development. However, limited versions of their features can already be accessed
|
development. However, limited versions of their features can already be accessed
|
||||||
@ -76,18 +87,18 @@ through the Text to Image and Image to Image tabs.
|
|||||||
|
|
||||||
## Walkthrough
|
## Walkthrough
|
||||||
|
|
||||||
The following walkthrough will exercise most (but not all) of the WebGUI's
|
The following walkthrough will exercise most (but not all) of the WebUI's
|
||||||
feature set.
|
feature set.
|
||||||
|
|
||||||
### Text to Image
|
### Text to Image
|
||||||
|
|
||||||
1. Launch the WebGUI using `python scripts/invoke.py --web` and connect to it
|
1. Launch the WebUI using `python scripts/invoke.py --web` and connect to it
|
||||||
with your browser by accessing `http://localhost:9090`. If the browser and
|
with your browser by accessing `http://localhost:9090`. If the browser and
|
||||||
server are running on different machines on your LAN, add the option
|
server are running on different machines on your LAN, add the option
|
||||||
`--host 0.0.0.0` to the launch command line and connect to the machine
|
`--host 0.0.0.0` to the launch command line and connect to the machine
|
||||||
hosting the web server using its IP address or domain name.
|
hosting the web server using its IP address or domain name.
|
||||||
|
|
||||||
2. If all goes well, the WebGUI should come up and you'll see a green
|
2. If all goes well, the WebUI should come up and you'll see a green
|
||||||
`connected` message on the upper right.
|
`connected` message on the upper right.
|
||||||
|
|
||||||
#### Basics
|
#### Basics
|
||||||
@ -234,7 +245,7 @@ walkthrough.
|
|||||||
|
|
||||||
2. Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or click
|
2. Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or click
|
||||||
the blank area to get an upload dialog. The image will load into an area
|
the blank area to get an upload dialog. The image will load into an area
|
||||||
marked _Initial Image_. (The WebGUI will also load the most
|
marked _Initial Image_. (The WebUI will also load the most
|
||||||
recently-generated image from the gallery into a section on the left, but
|
recently-generated image from the gallery into a section on the left, but
|
||||||
this image will be replaced in the next step.)
|
this image will be replaced in the next step.)
|
||||||
|
|
||||||
@ -284,13 +295,17 @@ initial image" icons are located.
|
|||||||
|
|
||||||
{:width="640px"}
|
{:width="640px"}
|
||||||
|
|
||||||
|
### Unified Canvas
|
||||||
|
|
||||||
|
See the [Unified Canvas Guide](UNIFIED_CANVAS.md)
|
||||||
|
|
||||||
## Parting remarks
|
## Parting remarks
|
||||||
|
|
||||||
This concludes the walkthrough, but there are several more features that you can
|
This concludes the walkthrough, but there are several more features that you can
|
||||||
explore. Please check out the [Command Line Interface](CLI.md) documentation for
|
explore. Please check out the [Command Line Interface](CLI.md) documentation for
|
||||||
further explanation of the advanced features that were not covered here.
|
further explanation of the advanced features that were not covered here.
|
||||||
|
|
||||||
The WebGUI is only rapid development. Check back regularly for updates!
|
The WebUI is only rapid development. Check back regularly for updates!
|
||||||
|
|
||||||
## Reference
|
## Reference
|
||||||
|
|
||||||
|
@ -2,4 +2,62 @@
|
|||||||
title: Overview
|
title: Overview
|
||||||
---
|
---
|
||||||
|
|
||||||
Here you can find the documentation for different features.
|
Here you can find the documentation for InvokeAI's various features.
|
||||||
|
|
||||||
|
## The Basics
|
||||||
|
### * The [Web User Interface](WEB.md)
|
||||||
|
Guide to the Web interface. Also see the [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
|
||||||
|
|
||||||
|
### * The [Unified Canvas](UNIFIED_CANVAS.md)
|
||||||
|
Build complex scenes by combine and modifying multiple images in a stepwise
|
||||||
|
fashion. This feature combines img2img, inpainting and outpainting in
|
||||||
|
a single convenient digital artist-optimized user interface.
|
||||||
|
|
||||||
|
### * The [Command Line Interface (CLI)](CLI.md)
|
||||||
|
Scriptable access to InvokeAI's features.
|
||||||
|
|
||||||
|
## Image Generation
|
||||||
|
### * [Prompt Engineering](PROMPTS.md)
|
||||||
|
Get the images you want with the InvokeAI prompt engineering language.
|
||||||
|
|
||||||
|
## * [Post-Processing](POSTPROCESS.md)
|
||||||
|
Restore mangled faces and make images larger with upscaling. Also see the [Embiggen Upscaling Guide](EMBIGGEN.md).
|
||||||
|
|
||||||
|
## * The [Concepts Library](CONCEPTS.md)
|
||||||
|
Add custom subjects and styles using HuggingFace's repository of embeddings.
|
||||||
|
|
||||||
|
### * [Image-to-Image Guide for the CLI](IMG2IMG.md)
|
||||||
|
Use a seed image to build new creations in the CLI.
|
||||||
|
|
||||||
|
### * [Inpainting Guide for the CLI](INPAINTING.md)
|
||||||
|
Selectively erase and replace portions of an existing image in the CLI.
|
||||||
|
|
||||||
|
### * [Outpainting Guide for the CLI](OUTPAINTING.md)
|
||||||
|
Extend the borders of the image with an "outcrop" function within the CLI.
|
||||||
|
|
||||||
|
### * [Generating Variations](VARIATIONS.md)
|
||||||
|
Have an image you like and want to generate many more like it? Variations
|
||||||
|
are the ticket.
|
||||||
|
|
||||||
|
## Model Management
|
||||||
|
|
||||||
|
## * [Model Installation](../installation/050_INSTALLING_MODELS.md)
|
||||||
|
Learn how to import third-party models and switch among them. This
|
||||||
|
guide also covers optimizing models to load quickly.
|
||||||
|
|
||||||
|
## * [Merging Models](MODEL_MERGING.md)
|
||||||
|
Teach an old model new tricks. Merge 2-3 models together to create a
|
||||||
|
new model that combines characteristics of the originals.
|
||||||
|
|
||||||
|
## * [Textual Inversion](TEXTUAL_INVERSION.md)
|
||||||
|
Personalize models by adding your own style or subjects.
|
||||||
|
|
||||||
|
# Other Features
|
||||||
|
|
||||||
|
## * [The NSFW Checker](NSFW.md)
|
||||||
|
Prevent InvokeAI from displaying unwanted racy images.
|
||||||
|
|
||||||
|
## * [Miscellaneous](OTHER.md)
|
||||||
|
Run InvokeAI on Google Colab, generate images with repeating patterns,
|
||||||
|
batch process a file of prompts, increase the "creativity" of image
|
||||||
|
generation by adding initial noise, and more!
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
<!-- HTML for static distribution bundle build -->
|
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<title>Swagger UI</title>
|
|
||||||
<link rel="stylesheet" type="text/css" href="swagger-ui/swagger-ui.css" />
|
|
||||||
<link rel="stylesheet" type="text/css" href="swagger-ui/index.css" />
|
|
||||||
<link rel="icon" type="image/png" href="swagger-ui/favicon-32x32.png" sizes="32x32" />
|
|
||||||
<link rel="icon" type="image/png" href="swagger-ui/favicon-16x16.png" sizes="16x16" />
|
|
||||||
</head>
|
|
||||||
|
|
||||||
<body>
|
|
||||||
<div id="swagger-ui"></div>
|
|
||||||
<script src="swagger-ui/swagger-ui-bundle.js" charset="UTF-8"> </script>
|
|
||||||
<script src="swagger-ui/swagger-ui-standalone-preset.js" charset="UTF-8"> </script>
|
|
||||||
<script src="swagger-ui/swagger-initializer.js" charset="UTF-8"> </script>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
237
docs/index.md
@ -81,28 +81,6 @@ Q&A</a>]
|
|||||||
|
|
||||||
This fork is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
|
This fork is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
|
||||||
|
|
||||||
## :octicons-package-dependencies-24: Installation
|
|
||||||
|
|
||||||
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
|
||||||
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
|
||||||
driver).
|
|
||||||
|
|
||||||
First time users, please see
|
|
||||||
[Automated Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
|
|
||||||
getting InvokeAI up and running on your system. For alternative installation and
|
|
||||||
upgrade instructions, please see:
|
|
||||||
[InvokeAI Installation Overview](installation/)
|
|
||||||
|
|
||||||
Users who wish to make use of the **PyPatchMatch** inpainting functions
|
|
||||||
will need to perform a bit of extra work to enable this
|
|
||||||
module. Instructions can be found at [Installing
|
|
||||||
PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md).
|
|
||||||
|
|
||||||
If you have an NVIDIA card, you can benefit from the significant
|
|
||||||
memory savings and performance benefits provided by Facebook Lab's
|
|
||||||
**xFormers** module. Instructions for Linux and Windows users can be found
|
|
||||||
at [Installing xFormers](installation/070_INSTALL_XFORMERS.md).
|
|
||||||
|
|
||||||
## :fontawesome-solid-computer: Hardware Requirements
|
## :fontawesome-solid-computer: Hardware Requirements
|
||||||
|
|
||||||
### :octicons-cpu-24: System
|
### :octicons-cpu-24: System
|
||||||
@ -122,141 +100,146 @@ images in full-precision mode:
|
|||||||
- GTX 1650 series cards
|
- GTX 1650 series cards
|
||||||
- GTX 1660 series cards
|
- GTX 1660 series cards
|
||||||
|
|
||||||
### :fontawesome-solid-memory: Memory
|
### :fontawesome-solid-memory: Memory and Disk
|
||||||
|
|
||||||
- At least 12 GB Main Memory RAM.
|
- At least 12 GB Main Memory RAM.
|
||||||
|
|
||||||
### :fontawesome-regular-hard-drive: Disk
|
|
||||||
|
|
||||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||||
all its dependencies.
|
all its dependencies.
|
||||||
|
|
||||||
!!! info
|
## :octicons-package-dependencies-24: Installation
|
||||||
|
|
||||||
Precision is auto configured based on the device. If however you encounter errors like
|
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
||||||
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||||
`invoke.py` with the `--precision=float32` flag:
|
driver).
|
||||||
|
|
||||||
```bash
|
### [Installation Getting Started Guide](installation)
|
||||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
|
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
||||||
```
|
This method is recommended for 1st time users
|
||||||
|
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||||
|
This method is recommended for experienced users and developers
|
||||||
|
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||||
|
This method is recommended for those familiar with running Docker containers
|
||||||
|
### Other Installation Guides
|
||||||
|
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
||||||
|
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
||||||
|
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
||||||
|
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
||||||
|
|
||||||
## :octicons-gift-24: InvokeAI Features
|
## :octicons-gift-24: InvokeAI Features
|
||||||
|
|
||||||
- [The InvokeAI Web Interface](features/WEB.md) -
|
### The InvokeAI Web Interface
|
||||||
[WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md) -
|
- [WebUI overview](features/WEB.md)
|
||||||
[WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||||
<!-- seperator -->
|
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||||
- [The Command Line Interace](features/CLI.md) -
|
<!-- separator -->
|
||||||
[Image2Image](features/IMG2IMG.md) - [Inpainting](features/INPAINTING.md) -
|
### The InvokeAI Command Line Interface
|
||||||
[Outpainting](features/OUTPAINTING.md) -
|
- [Command Line Interace Reference Guide](features/CLI.md)
|
||||||
[Adding custom styles and subjects](features/CONCEPTS.md) -
|
<!-- separator -->
|
||||||
[Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
### Image Management
|
||||||
<!-- seperator -->
|
- [Image2Image](features/IMG2IMG.md)
|
||||||
- [Generating Variations](features/VARIATIONS.md)
|
- [Inpainting](features/INPAINTING.md)
|
||||||
<!-- seperator -->
|
- [Outpainting](features/OUTPAINTING.md)
|
||||||
- [Prompt Engineering](features/PROMPTS.md)
|
- [Adding custom styles and subjects](features/CONCEPTS.md)
|
||||||
<!-- seperator -->
|
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||||
- [Model Merging](features/MODEL_MERGING.md)
|
|
||||||
<!-- seperator -->
|
|
||||||
- Miscellaneous
|
|
||||||
- [NSFW Checker](features/NSFW.md)
|
|
||||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||||
- [Other](features/OTHER.md)
|
- [Other Features](features/OTHER.md)
|
||||||
|
|
||||||
|
<!-- separator -->
|
||||||
|
### Model Management
|
||||||
|
- [Installing](installation/050_INSTALLING_MODELS.md)
|
||||||
|
- [Model Merging](features/MODEL_MERGING.md)
|
||||||
|
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
|
||||||
|
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
|
||||||
|
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
|
||||||
|
<!-- seperator -->
|
||||||
|
### Prompt Engineering
|
||||||
|
- [Prompt Syntax](features/PROMPTS.md)
|
||||||
|
- [Generating Variations](features/VARIATIONS.md)
|
||||||
|
|
||||||
## :octicons-log-16: Latest Changes
|
## :octicons-log-16: Latest Changes
|
||||||
|
|
||||||
### v2.2.4 <small>(11 December 2022)</small>
|
### v2.3.0 <small>(9 February 2023)</small>
|
||||||
|
|
||||||
#### the `invokeai` directory
|
#### Migration to Stable Diffusion `diffusers` models
|
||||||
|
|
||||||
Previously there were two directories to worry about, the directory that
|
Previous versions of InvokeAI supported the original model file format introduced with Stable Diffusion 1.4. In the original format, known variously as "checkpoint", or "legacy" format, there is a single large weights file ending with `.ckpt` or `.safetensors`. Though this format has served the community well, it has a number of disadvantages, including file size, slow loading times, and a variety of non-standard variants that require special-case code to handle. In addition, because checkpoint files are actually a bundle of multiple machine learning sub-models, it is hard to swap different sub-models in and out, or to share common sub-models. A new format, introduced by the StabilityAI company in collaboration with HuggingFace, is called `diffusers` and consists of a directory of individual models. The most immediate benefit of `diffusers` is that they load from disk very quickly. A longer term benefit is that in the near future `diffusers` models will be able to share common sub-models, dramatically reducing disk space when you have multiple fine-tune models derived from the same base.
|
||||||
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
|
|
||||||
directory that contained the models files, embeddings, configuration and
|
|
||||||
outputs. With the 2.2.4 release, this dual system is done away with, and
|
|
||||||
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
|
|
||||||
live in a directory named `invokeai`. By default this directory is located in
|
|
||||||
your home directory (e.g. `\Users\yourname` on Windows), but you can select
|
|
||||||
where it goes at install time.
|
|
||||||
|
|
||||||
After installation, you can delete the install directory (the one that the zip
|
When you perform a new install of version 2.3.0, you will be offered the option to install the `diffusers` versions of a number of popular SD models, including Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of 2.1). These will act and work just like the checkpoint versions. Do not be concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk! InvokeAI 2.3.0 can still load these and generate images from them without any extra intervention on your part.
|
||||||
file creates when it unpacks). Do **not** delete or move the `invokeai`
|
|
||||||
directory!
|
|
||||||
|
|
||||||
##### Initialization file `invokeai/invokeai.init`
|
To take advantage of the optimized loading times of `diffusers` models, InvokeAI offers options to convert legacy checkpoint models into optimized `diffusers` models. If you use the `invokeai` command line interface, the relevant commands are:
|
||||||
|
|
||||||
You can place frequently-used startup options in this file, such as the default
|
* `!convert_model` -- Take the path to a local checkpoint file or a URL that is pointing to one, convert it into a `diffusers` model, and import it into InvokeAI's models registry file.
|
||||||
number of steps or your preferred sampler. To keep everything in one place, this
|
* `!optimize_model` -- If you already have a checkpoint model in your InvokeAI models file, this command will accept its short name and convert it into a like-named `diffusers` model, optionally deleting the original checkpoint file.
|
||||||
file has now been moved into the `invokeai` directory and is named
|
* `!import_model` -- Take the local path of either a checkpoint file or a `diffusers` model directory and import it into InvokeAI's registry file. You may also provide the ID of any diffusers model that has been published on the [HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads) and it will be downloaded and installed automatically.
|
||||||
`invokeai.init`.
|
|
||||||
|
|
||||||
#### To update from Version 2.2.3
|
The WebGUI offers similar functionality for model management.
|
||||||
|
|
||||||
The easiest route is to download and unpack one of the 2.2.4 installer files.
|
For advanced users, new command-line options provide additional functionality. Launching `invokeai` with the argument `--autoconvert <path to directory>` takes the path to a directory of checkpoint files, automatically converts them into `diffusers` models and imports them. Each time the script is launched, the directory will be scanned for new checkpoint files to be loaded. Alternatively, the `--ckpt_convert` argument will cause any checkpoint or safetensors model that is already registered with InvokeAI to be converted into a `diffusers` model on the fly, allowing you to take advantage of future diffusers-only features without explicitly converting the model and saving it to disk.
|
||||||
When it asks you for the location of the `invokeai` runtime directory, respond
|
|
||||||
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
|
|
||||||
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
|
|
||||||
and answer "Y" when asked if you want to reuse the directory.
|
|
||||||
|
|
||||||
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
|
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management in both the command-line and Web interfaces.
|
||||||
does not know about the new directory layout and won't be fully functional.
|
|
||||||
|
|
||||||
#### To update to 2.2.5 (and beyond) there's now an update path.
|
#### Support for the `XFormers` Memory-Efficient Crossattention Package
|
||||||
|
|
||||||
As they become available, you can update to more recent versions of InvokeAI
|
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once installed, the`xformers` package dramatically reduces the memory footprint of loaded Stable Diffusion models files and modestly increases image generation speed. `xformers` will be installed and activated automatically if you specify a CUDA system at install time.
|
||||||
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
|
|
||||||
Running it without any arguments will install the most recent version of
|
|
||||||
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
|
|
||||||
script with an argument in the command shell. This syntax accepts the path to
|
|
||||||
the desired release's zip file, which you can find by clicking on the green
|
|
||||||
"Code" button on this repository's home page.
|
|
||||||
|
|
||||||
#### Other 2.2.4 Improvements
|
The caveat with using `xformers` is that it introduces slightly non-deterministic behavior, and images generated using the same seed and other settings will be subtly different between invocations. Generally the changes are unnoticeable unless you rapidly shift back and forth between images, but to disable `xformers` and restore fully deterministic behavior, you may launch InvokeAI using the `--no-xformers` option. This is most conveniently done by opening the file `invokeai/invokeai.init` with a text editor, and adding the line `--no-xformers` at the bottom.
|
||||||
|
|
||||||
- Fix InvokeAI GUI initialization by @addianto in #1687
|
#### A Negative Prompt Box in the WebUI
|
||||||
- fix link in documentation by @lstein in #1728
|
|
||||||
- Fix broken link by @ShawnZhong in #1736
|
|
||||||
- Remove reference to binary installer by @lstein in #1731
|
|
||||||
- documentation fixes for 2.2.3 by @lstein in #1740
|
|
||||||
- Modify installer links to point closer to the source installer by @ebr in
|
|
||||||
#1745
|
|
||||||
- add documentation warning about 1650/60 cards by @lstein in #1753
|
|
||||||
- Fix Linux source URL in installation docs by @andybearman in #1756
|
|
||||||
- Make install instructions discoverable in readme by @damian0815 in #1752
|
|
||||||
- typo fix by @ofirkris in #1755
|
|
||||||
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
|
|
||||||
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
|
|
||||||
in #1765
|
|
||||||
- stability and usage improvements to binary & source installers by @lstein in
|
|
||||||
#1760
|
|
||||||
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
|
|
||||||
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
|
|
||||||
- invoke script cds to its location before running by @lstein in #1805
|
|
||||||
- Make PaperCut and VoxelArt models load again by @lstein in #1730
|
|
||||||
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
|
|
||||||
#1817
|
|
||||||
- Clean up readme by @hipsterusername in #1820
|
|
||||||
- Optimized Docker build with support for external working directory by @ebr in
|
|
||||||
#1544
|
|
||||||
- disable pushing the cloud container by @mauwii in #1831
|
|
||||||
- Fix docker push github action and expand with additional metadata by @ebr in
|
|
||||||
#1837
|
|
||||||
- Fix Broken Link To Notebook by @VedantMadane in #1821
|
|
||||||
- Account for flat models by @spezialspezial in #1766
|
|
||||||
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
|
|
||||||
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
|
|
||||||
@SammCheese in #1848
|
|
||||||
- Make force free GPU memory work in img2img by @addianto in #1844
|
|
||||||
- New installer by @lstein
|
|
||||||
|
|
||||||
|
There is now a separate text input box for negative prompts in the WebUI. This is convenient for stashing frequently-used negative prompts ("mangled limbs, bad anatomy"). The `[negative prompt]` syntax continues to work in the main prompt box as well.
|
||||||
|
|
||||||
|
To see exactly how your prompts are being parsed, launch `invokeai` with the `--log_tokenization` option. The console window will then display the tokenization process for both positive and negative prompts.
|
||||||
|
|
||||||
|
#### Model Merging
|
||||||
|
|
||||||
|
Version 2.3.0 offers an intuitive user interface for merging up to three Stable Diffusion models using an intuitive user interface. Model merging allows you to mix the behavior of models to achieve very interesting effects. To use this, each of the models must already be imported into InvokeAI and saved in `diffusers` format, then launch the merger using a new menu item in the InvokeAI launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line with `invokeai-merge --gui`. You will be prompted to select the models to merge, the proportions in which to mix them, and the mixing algorithm. The script will create a new merged `diffusers` model and import it into InvokeAI for your use.
|
||||||
|
|
||||||
|
See [MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/) for more details.
|
||||||
|
|
||||||
|
#### Textual Inversion Training
|
||||||
|
|
||||||
|
Textual Inversion (TI) is a technique for training a Stable Diffusion model to emit a particular subject or style when triggered by a keyword phrase. You can perform TI training by placing a small number of images of the subject or style in a directory, and choosing a distinctive trigger phrase, such as "pointillist-style". After successful training, The subject or style will be activated by including `<pointillist-style>` in your prompt.
|
||||||
|
|
||||||
|
Previous versions of InvokeAI were able to perform TI, but it required using a command-line script with dozens of obscure command-line arguments. Version 2.3.0 features an intuitive TI frontend that will build a TI model on top of any `diffusers` model. To access training you can launch from a new item in the launcher script or from the command line using `invokeai-ti --gui`.
|
||||||
|
|
||||||
|
See [TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) for further details.
|
||||||
|
|
||||||
|
#### A New Installer Experience
|
||||||
|
|
||||||
|
The InvokeAI installer has been upgraded in order to provide a smoother and hopefully more glitch-free experience. In addition, InvokeAI is now packaged as a PyPi project, allowing developers and power-users to install InvokeAI with the command `pip install InvokeAI --use-pep517`. Please see [Installation](#installation) for details.
|
||||||
|
|
||||||
|
Developers should be aware that the `pip` installation procedure has been simplified and that the `conda` method is no longer supported at all. Accordingly, the `environments_and_requirements` directory has been deleted from the repository.
|
||||||
|
|
||||||
|
#### Command-line name changes
|
||||||
|
|
||||||
|
All of InvokeAI's functionality, including the WebUI, command-line interface, textual inversion training and model merging, can all be accessed from the `invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been expanded to add the new functionality. For the convenience of developers and power users, we have normalized the names of the InvokeAI command-line scripts:
|
||||||
|
|
||||||
|
* `invokeai` -- Command-line client
|
||||||
|
* `invokeai --web` -- Web GUI
|
||||||
|
* `invokeai-merge --gui` -- Model merging script with graphical front end
|
||||||
|
* `invokeai-ti --gui` -- Textual inversion script with graphical front end
|
||||||
|
* `invokeai-configure` -- Configuration tool for initializing the `invokeai` directory and selecting popular starter models.
|
||||||
|
|
||||||
|
For backward compatibility, the old command names are also recognized, including `invoke.py` and `configure-invokeai.py`. However, these are deprecated and will eventually be removed.
|
||||||
|
|
||||||
|
Developers should be aware that the locations of the script's source code has been moved. The new locations are:
|
||||||
|
* `invokeai` => `ldm/invoke/CLI.py`
|
||||||
|
* `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
|
||||||
|
* `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
|
||||||
|
* `invokeai-merge` => `ldm/invoke/merge_diffusers`
|
||||||
|
|
||||||
|
Developers are strongly encouraged to perform an "editable" install of InvokeAI using `pip install -e . --use-pep517` in the Git repository, and then to call the scripts using their 2.3.0 names, rather than executing the scripts directly. Developers should also be aware that the several important data files have been relocated into a new directory named `invokeai`. This includes the WebGUI's `frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used by the installer to select starter models. Eventually all InvokeAI modules will be in subdirectories of `invokeai`.
|
||||||
|
|
||||||
|
Please see [2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0) for further details.
|
||||||
For older changelogs, please visit the
|
For older changelogs, please visit the
|
||||||
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
|
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
|
||||||
|
|
||||||
## :material-target: Troubleshooting
|
## :material-target: Troubleshooting
|
||||||
|
|
||||||
Please check out our
|
Please check out our **[:material-frequently-asked-questions:
|
||||||
**[:material-frequently-asked-questions: Q&A](help/TROUBLESHOOT.md)** to get
|
Troubleshooting
|
||||||
solutions for common installation problems and other issues.
|
Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)** to
|
||||||
|
get solutions for common installation problems and other issues.
|
||||||
|
|
||||||
## :octicons-repo-push-24: Contributing
|
## :octicons-repo-push-24: Contributing
|
||||||
|
|
||||||
@ -282,8 +265,8 @@ thank them for their time, hard work and effort.
|
|||||||
For support, please use this repository's GitHub Issues tracking service. Feel
|
For support, please use this repository's GitHub Issues tracking service. Feel
|
||||||
free to send me an email if you use and like the script.
|
free to send me an email if you use and like the script.
|
||||||
|
|
||||||
Original portions of the software are Copyright (c) 2020
|
Original portions of the software are Copyright (c) 2022-23
|
||||||
[Lincoln D. Stein](https://github.com/lstein)
|
by [The InvokeAI Team](https://github.com/invoke-ai).
|
||||||
|
|
||||||
## :octicons-book-24: Further Reading
|
## :octicons-book-24: Further Reading
|
||||||
|
|
||||||
|
@ -6,81 +6,76 @@ title: Installing with the Automated Installer
|
|||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
The automated installer is a shell script that attempts to automate every step
|
The automated installer is a Python script that automates the steps
|
||||||
needed to install and run InvokeAI on a stock computer running recent versions
|
needed to install and run InvokeAI on a stock computer running recent
|
||||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
versions of Linux, MacOS or Windows. It will leave you with a version
|
||||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
that runs a stable version of InvokeAI with the option to upgrade to
|
||||||
|
experimental versions later.
|
||||||
|
|
||||||
## Walk through
|
## Walk through
|
||||||
|
|
||||||
1. Make sure that your system meets the
|
1. <a name="hardware_requirements">**Hardware Requirements**: </a>Make sure that your system meets the [hardware
|
||||||
[hardware requirements](../index.md#hardware-requirements) and has the
|
requirements](../index.md#hardware-requirements) and has the
|
||||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
appropriate GPU drivers installed. For a system with an NVIDIA
|
||||||
with an AMD GPU installed, you may need to install the
|
card installed, you will need to install the CUDA driver, while
|
||||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
AMD-based cards require the ROCm driver. In most cases, if you've
|
||||||
|
already used the system for gaming or other graphics-intensive
|
||||||
|
tasks, the appropriate drivers will already be installed. If
|
||||||
|
unsure, check the [GPU Driver Guide](030_INSTALL_CUDA_AND_ROCM.md)
|
||||||
|
|
||||||
!!! info "Required Space"
|
!!! info "Required Space"
|
||||||
|
|
||||||
Installation requires roughly 18G of free disk space to load the libraries and
|
Installation requires roughly 18G of free disk space to load
|
||||||
recommended model weights files.
|
the libraries and recommended model weights files.
|
||||||
|
|
||||||
Regardless of your destination disk, your *system drive* (`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB of free disk space to download and cache python dependencies. NOTE for Linux users: if your temporary directory is mounted as a `tmpfs`, ensure it has sufficient space.
|
Regardless of your destination disk, your *system drive*
|
||||||
|
(`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB
|
||||||
|
of free disk space to download and cache python
|
||||||
|
dependencies.
|
||||||
|
|
||||||
2. Check that your system has an up-to-date Python installed. To do this, open
|
NOTE for Linux users: if your temporary directory is mounted
|
||||||
up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
|
as a `tmpfs`, ensure it has sufficient space.
|
||||||
"Powershell" on Windows) and type `python --version`. If Python is
|
|
||||||
installed, it will print out the version number. If it is version `3.9.1` or `3.10.x`, you meet requirements.
|
|
||||||
|
|
||||||
!!! warning "At this time we do not recommend Python 3.11"
|
2. <a name="software_requirements">**Software Requirements**: </a>Check that your system has an up-to-date Python installed. To do
|
||||||
|
this, open up a command-line window ("Terminal" on Linux and
|
||||||
|
Macintosh, "Command" or "Powershell" on Windows) and type `python
|
||||||
|
--version`. If Python is installed, it will print out the version
|
||||||
|
number. If it is version `3.9.*` or `3.10.*`, you meet
|
||||||
|
requirements. We do not recommend using Python 3.11 or higher,
|
||||||
|
as not all the libraries that InvokeAI depends on work properly
|
||||||
|
with this version.
|
||||||
|
|
||||||
!!! warning "If you see an older version, or get a command not found error"
|
!!! warning "What to do if you have an unsupported version"
|
||||||
|
|
||||||
Go to [Python Downloads](https://www.python.org/downloads/) and
|
Go to [Python Downloads](https://www.python.org/downloads/)
|
||||||
download the appropriate installer package for your platform. We recommend
|
and download the appropriate installer package for your
|
||||||
[Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
|
platform. We recommend [Version
|
||||||
|
3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||||
which has been extensively tested with InvokeAI.
|
which has been extensively tested with InvokeAI.
|
||||||
|
|
||||||
|
|
||||||
_Please select your platform in the section below for platform-specific
|
_Please select your platform in the section below for platform-specific
|
||||||
setup requirements._
|
setup requirements._
|
||||||
|
|
||||||
=== "Windows users"
|
=== "Windows"
|
||||||
|
During the Python configuration process, look out for a
|
||||||
|
checkbox to add Python to your PATH and select it. If the
|
||||||
|
install script complains that it can't find python, then open
|
||||||
|
the Python installer again and choose "Modify" existing
|
||||||
|
installation.
|
||||||
|
|
||||||
- During the Python configuration process,
|
Installation requires an up to date version of the Microsoft
|
||||||
look out for a checkbox to add Python to your PATH
|
Visual C libraries. Please install the 2015-2022 libraries
|
||||||
and select it. If the install script complains that it can't
|
available here:
|
||||||
find python, then open the Python installer again and choose
|
https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
||||||
"Modify" existing installation.
|
|
||||||
|
|
||||||
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
Please double-click on the file `WinLongPathsEnabled.reg` and
|
||||||
|
accept the dialog box that asks you if you wish to modify your registry.
|
||||||
|
This activates long filename support on your system and will prevent
|
||||||
|
mysterious errors during installation.
|
||||||
|
|
||||||
=== "Mac users"
|
=== "Linux"
|
||||||
|
To install an appropriate version of Python on Ubuntu 22.04
|
||||||
- After installing Python, you may need to run the
|
and higher, run the following:
|
||||||
following command from the Terminal in order to install the Web
|
|
||||||
certificates needed to download model data from https sites. If
|
|
||||||
you see lots of CERTIFICATE ERRORS during the last part of the
|
|
||||||
install, this is the problem, and you can fix it with this command:
|
|
||||||
|
|
||||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
|
||||||
|
|
||||||
- You may need to install the Xcode command line tools. These
|
|
||||||
are a set of tools that are needed to run certain applications in a
|
|
||||||
Terminal, including InvokeAI. This package is provided directly by Apple.
|
|
||||||
|
|
||||||
- To install, open a terminal window and run `xcode-select
|
|
||||||
--install`. You will get a macOS system popup guiding you through the
|
|
||||||
install. If you already have them installed, you will instead see some
|
|
||||||
output in the Terminal advising you that the tools are already installed.
|
|
||||||
|
|
||||||
- More information can be found here:
|
|
||||||
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
|
||||||
|
|
||||||
=== "Linux users"
|
|
||||||
|
|
||||||
For reasons that are not entirely clear, installing the correct version of Python can be a bit of a challenge on Ubuntu, Linux Mint, Pop!_OS, and other Debian-derived distributions.
|
|
||||||
|
|
||||||
On Ubuntu 22.04 and higher, run the following:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
sudo apt update
|
sudo apt update
|
||||||
@ -98,75 +93,101 @@ version of InvokeAI with the option to upgrade to experimental versions later.
|
|||||||
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
|
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
|
||||||
```
|
```
|
||||||
|
|
||||||
Both `python` and `python3` commands are now pointing at Python3.10. You can still access older versions of Python by calling `python2`, `python3.8`, etc.
|
Both `python` and `python3` commands are now pointing at
|
||||||
|
Python3.10. You can still access older versions of Python by
|
||||||
|
calling `python2`, `python3.8`, etc.
|
||||||
|
|
||||||
Linux systems require a couple of additional graphics libraries to be installed for proper functioning of `python3-opencv`. Please run the following:
|
Linux systems require a couple of additional graphics
|
||||||
|
libraries to be installed for proper functioning of
|
||||||
|
`python3-opencv`. Please run the following:
|
||||||
|
|
||||||
`sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
`sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
||||||
|
|
||||||
3. The source installer is distributed in ZIP files. Go to the
|
=== "Mac"
|
||||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
|
||||||
look for a series of files named:
|
|
||||||
|
|
||||||
- InvokeAI-installer-2.X.X.zip
|
After installing Python, you may need to run the
|
||||||
|
following command from the Terminal in order to install the Web
|
||||||
|
certificates needed to download model data from https sites. If
|
||||||
|
you see lots of CERTIFICATE ERRORS during the last part of the
|
||||||
|
install, this is the problem, and you can fix it with this command:
|
||||||
|
|
||||||
(Where 2.X.X is the current release number).
|
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||||
|
|
||||||
Download the latest release.
|
You may need to install the Xcode command line tools. These
|
||||||
|
are a set of tools that are needed to run certain applications in a
|
||||||
|
Terminal, including InvokeAI. This package is provided
|
||||||
|
directly by Apple. To install, open a terminal window and run `xcode-select --install`. You will get a macOS system popup guiding you through the
|
||||||
|
install. If you already have them installed, you will instead see some
|
||||||
|
output in the Terminal advising you that the tools are already installed. More information can be found at [FreeCode Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
|
||||||
|
|
||||||
4. Unpack the zip file into a convenient directory. This will create a new
|
3. **Download the Installer**: The InvokeAI installer is distributed as a ZIP files. Go to the
|
||||||
directory named "InvokeAI-Installer". This example shows how this would look
|
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest),
|
||||||
using the `unzip` command-line tool, but you may use any graphical or
|
and look for a file named:
|
||||||
command-line Zip extractor:
|
|
||||||
|
|
||||||
```cmd
|
- InvokeAI-installer-v2.X.X.zip
|
||||||
C:\Documents\Linco> unzip InvokeAI-installer-2.X.X-windows.zip
|
|
||||||
Archive: C: \Linco\Downloads\InvokeAI-installer-2.X.X-windows.zip
|
|
||||||
creating: InvokeAI-Installer\
|
|
||||||
inflating: InvokeAI-Installer\install.bat
|
|
||||||
inflating: InvokeAI-Installer\readme.txt
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
After successful installation, you can delete the `InvokeAI-Installer`
|
where "2.X.X" is the latest released version. The file is located
|
||||||
directory.
|
at the very bottom of the release page, under **Assets**.
|
||||||
|
|
||||||
5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
|
4. **Unpack the installer**: Unpack the zip file into a convenient directory. This will create a new
|
||||||
accept the dialog box that asks you if you wish to modify your registry.
|
directory named "InvokeAI-Installer". When unpacked, the directory
|
||||||
This activates long filename support on your system and will prevent
|
will look like this:
|
||||||
mysterious errors during installation.
|
|
||||||
|
|
||||||
6. If you are using a desktop GUI, double-click the installer file. It will be
|
<figure markdown>
|
||||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|

|
||||||
Macintosh systems.
|
</figure>
|
||||||
|
|
||||||
On Windows systems you will probably get an "Untrusted Publisher" warning.
|
5. **Launch the installer script from the desktop**: If you are using a desktop GUI, double-click the installer file
|
||||||
Click on "More Info" and select "Run Anyway." You trust us, right?
|
appropriate for your platform. It will be named `install.bat` on
|
||||||
|
Windows systems and `install.sh` on Linux and Macintosh
|
||||||
|
systems. Be aware that your system's file browser may suppress the
|
||||||
|
display of the file extension.
|
||||||
|
|
||||||
7. Alternatively, from the command line, run the shell script or .bat file:
|
On Windows systems if you get an "Untrusted Publisher" warning.
|
||||||
|
Click on "More Info" and then select "Run Anyway." You trust us, right?
|
||||||
|
|
||||||
|
6. **[Alternative] Launch the installer script from the command line**: Alternatively, from the command line, run the shell script or .bat file:
|
||||||
|
|
||||||
```cmd
|
```cmd
|
||||||
C:\Documents\Linco> cd InvokeAI-Installer
|
C:\Documents\Linco> cd InvokeAI-Installer
|
||||||
C:\Documents\Linco\invokeAI> install.bat
|
C:\Documents\Linco\invokeAI> .\install.bat
|
||||||
```
|
```
|
||||||
|
|
||||||
8. The script will ask you to choose where to install InvokeAI. Select a
|
7. **Select the location to install InvokeAI**: The script will ask you to choose where to install InvokeAI. Select a
|
||||||
directory with at least 18G of free space for a full install. InvokeAI and
|
directory with at least 18G of free space for a full install. InvokeAI and
|
||||||
all its support files will be installed into a new directory named
|
all its support files will be installed into a new directory named
|
||||||
`invokeai` located at the location you specify.
|
`invokeai` located at the location you specify.
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|

|
||||||
|
</figure>
|
||||||
|
|
||||||
- The default is to install the `invokeai` directory in your home directory,
|
- The default is to install the `invokeai` directory in your home directory,
|
||||||
usually `C:\Users\YourName\invokeai` on Windows systems,
|
usually `C:\Users\YourName\invokeai` on Windows systems,
|
||||||
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
||||||
on Macintoshes, where "YourName" is your login name.
|
on Macintoshes, where "YourName" is your login name.
|
||||||
|
|
||||||
|
-If you have previously installed InvokeAI, you will be asked to
|
||||||
|
confirm whether you want to reinstall into this directory. You
|
||||||
|
may choose to reinstall, in which case your version will be upgraded,
|
||||||
|
or choose a different directory.
|
||||||
|
|
||||||
- The script uses tab autocompletion to suggest directory path completions.
|
- The script uses tab autocompletion to suggest directory path completions.
|
||||||
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
||||||
to suggest completions.
|
to suggest completions.
|
||||||
|
|
||||||
9. Sit back and let the install script work. It will install the third-party
|
8. **Select your GPU**: The installer will autodetect your platform and will request you to
|
||||||
libraries needed by InvokeAI, then download the current InvokeAI release and
|
confirm the type of GPU your graphics card has. On Linux systems,
|
||||||
install it.
|
you will have the choice of CUDA (NVidia cards), ROCm (AMD cards),
|
||||||
|
or CPU (no graphics acceleration). On Windows, you'll have the
|
||||||
|
choice of CUDA vs CPU, and on Macs you'll be offered CPU only. When
|
||||||
|
you select CPU on M1 or M2 Macintoshes, you will get MPS-based
|
||||||
|
graphics acceleration without installing additional drivers. If you
|
||||||
|
are unsure what GPU you are using, you can ask the installer to
|
||||||
|
guess.
|
||||||
|
|
||||||
|
9. **Watch it go!**: Sit back and let the install script work. It will install the third-party
|
||||||
|
libraries needed by InvokeAI and the application itself.
|
||||||
|
|
||||||
Be aware that some of the library download and install steps take a long
|
Be aware that some of the library download and install steps take a long
|
||||||
time. In particular, the `pytorch` package is quite large and often appears
|
time. In particular, the `pytorch` package is quite large and often appears
|
||||||
@ -176,25 +197,141 @@ version of InvokeAI with the option to upgrade to experimental versions later.
|
|||||||
minutes and nothing is happening, you can interrupt the script with ^C. You
|
minutes and nothing is happening, you can interrupt the script with ^C. You
|
||||||
may restart it and it will pick up where it left off.
|
may restart it and it will pick up where it left off.
|
||||||
|
|
||||||
10. After installation completes, the installer will launch the configuration script, which will guide you through the first-time process
|
<figure markdown>
|
||||||
of selecting one or more Stable Diffusion model weights files, downloading
|

|
||||||
and configuring them. We provide a list of popular models that InvokeAI
|
</figure>
|
||||||
performs well with. However, you can add more weight files later on using
|
|
||||||
the command-line client or the Web UI. See
|
|
||||||
[Installing Models](050_INSTALLING_MODELS.md) for details.
|
|
||||||
|
|
||||||
Note that the main Stable Diffusion weights file is protected by a license
|
10. **Post-install Configuration**: After installation completes, the
|
||||||
agreement that you must agree to in order to use. The script will list the
|
installer will launch the configuration form, which will guide you
|
||||||
steps you need to take to create an account on the official site that hosts
|
through the first-time process of adjusting some of InvokeAI's
|
||||||
the weights files, accept the agreement, and provide an access token that
|
startup settings. To move around this form use ctrl-N for
|
||||||
allows InvokeAI to legally download and install the weights files.
|
<N>ext and ctrl-P for <P>revious, or use <tab>
|
||||||
|
and shift-<tab> to move forward and back. Once you are in a
|
||||||
|
multi-checkbox field use the up and down cursor keys to select the
|
||||||
|
item you want, and <space> to toggle it on and off. Within
|
||||||
|
a directory field, pressing <tab> will provide autocomplete
|
||||||
|
options.
|
||||||
|
|
||||||
If you have already downloaded the weights file(s) for another Stable
|
Generally the defaults are fine, and you can come back to this screen at
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
any time to tweak your system. Here are the options you can adjust:
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
|
||||||
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
|
|
||||||
|
|
||||||
11. The script will now exit and you'll be ready to generate some images. Look
|
- ***Output directory for images***
|
||||||
|
This is the path to a directory in which InvokeAI will store all its
|
||||||
|
generated images.
|
||||||
|
|
||||||
|
- ***NSFW checker***
|
||||||
|
If checked, InvokeAI will test images for potential sexual content
|
||||||
|
and blur them out if found. Note that the NSFW checker consumes
|
||||||
|
an additional 0.6 GB of VRAM on top of the 2-3 GB of VRAM used
|
||||||
|
by most image models. If you have a low VRAM GPU (4-6 GB), you
|
||||||
|
can reduce out of memory errors by disabling the checker.
|
||||||
|
|
||||||
|
- ***HuggingFace Access Token***
|
||||||
|
InvokeAI has the ability to download embedded styles and subjects
|
||||||
|
from the HuggingFace Concept Library on-demand. However, some of
|
||||||
|
the concept library files are password protected. To make download
|
||||||
|
smoother, you can set up an account at huggingface.co, obtain an
|
||||||
|
access token, and paste it into this field. Note that you paste
|
||||||
|
to this screen using ctrl-shift-V
|
||||||
|
|
||||||
|
- ***Free GPU memory after each generation***
|
||||||
|
This is useful for low-memory machines and helps minimize the
|
||||||
|
amount of GPU VRAM used by InvokeAI.
|
||||||
|
|
||||||
|
- ***Enable xformers support if available***
|
||||||
|
If the xformers library was successfully installed, this will activate
|
||||||
|
it to reduce memory consumption and increase rendering speed noticeably.
|
||||||
|
Note that xformers has the side effect of generating slightly different
|
||||||
|
images even when presented with the same seed and other settings.
|
||||||
|
|
||||||
|
- ***Force CPU to be used on GPU systems***
|
||||||
|
This will use the (slow) CPU rather than the accelerated GPU. This
|
||||||
|
can be used to generate images on systems that don't have a compatible
|
||||||
|
GPU.
|
||||||
|
|
||||||
|
- ***Precision***
|
||||||
|
This controls whether to use float32 or float16 arithmetic.
|
||||||
|
float16 uses less memory but is also slightly less accurate.
|
||||||
|
Ordinarily the right arithmetic is picked automatically ("auto"),
|
||||||
|
but you may have to use float32 to get images on certain systems
|
||||||
|
and graphics cards. The "autocast" option is deprecated and
|
||||||
|
shouldn't be used unless you are asked to by a member of the team.
|
||||||
|
|
||||||
|
- ***Number of models to cache in CPU memory***
|
||||||
|
This allows you to keep models in memory and switch rapidly among
|
||||||
|
them rather than having them load from disk each time. This slider
|
||||||
|
controls how many models to keep loaded at once. Each
|
||||||
|
model will use 2-4 GB of RAM, so use this cautiously
|
||||||
|
|
||||||
|
- ***Directory containing embedding/textual inversion files***
|
||||||
|
This is the directory in which you can place custom embedding
|
||||||
|
files (.pt or .bin). During startup, this directory will be
|
||||||
|
scanned and InvokeAI will print out the text terms that
|
||||||
|
are available to trigger the embeddings.
|
||||||
|
|
||||||
|
At the bottom of the screen you will see a checkbox for accepting
|
||||||
|
the CreativeML Responsible AI License. You need to accept the license
|
||||||
|
in order to download Stable Diffusion models from the next screen.
|
||||||
|
|
||||||
|
_You can come back to the startup options form_ as many times as you like.
|
||||||
|
From the `invoke.sh` or `invoke.bat` launcher, select option (6) to relaunch
|
||||||
|
this script. On the command line, it is named `invokeai-configure`.
|
||||||
|
|
||||||
|
11. **Downloading Models**: After you press `[NEXT]` on the screen, you will be taken
|
||||||
|
to another screen that prompts you to download a series of starter models. The ones
|
||||||
|
we recommend are preselected for you, but you are encouraged to use the checkboxes to
|
||||||
|
pick and choose.
|
||||||
|
You will probably wish to download `autoencoder-840000` for use with models that
|
||||||
|
were trained with an older version of the Stability VAE.
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|

|
||||||
|
</figure>
|
||||||
|
|
||||||
|
Below the preselected list of starter models is a large text field which you can use
|
||||||
|
to specify a series of models to import. You can specify models in a variety of formats,
|
||||||
|
each separated by a space or newline. The formats accepted are:
|
||||||
|
|
||||||
|
- The path to a .ckpt or .safetensors file. On most systems, you can drag a file from
|
||||||
|
the file browser to the textfield to automatically paste the path. Be sure to remove
|
||||||
|
extraneous quotation marks and other things that come along for the ride.
|
||||||
|
|
||||||
|
- The path to a directory containing a combination of `.ckpt` and `.safetensors` files.
|
||||||
|
The directory will be scanned from top to bottom (including subfolders) and any
|
||||||
|
file that can be imported will be.
|
||||||
|
|
||||||
|
- A URL pointing to a `.ckpt` or `.safetensors` file. You can cut
|
||||||
|
and paste directly from a web page, or simply drag the link from the web page
|
||||||
|
or navigation bar. (You can also use ctrl-shift-V to paste into this field)
|
||||||
|
The file will be downloaded and installed.
|
||||||
|
|
||||||
|
- The HuggingFace repository ID (repo_id) for a `diffusers` model. These IDs have
|
||||||
|
the format _author_name/model_name_, as in `andite/anything-v4.0`
|
||||||
|
|
||||||
|
- The path to a local directory containing a `diffusers`
|
||||||
|
model. These directories always have the file `model_index.json`
|
||||||
|
at their top level.
|
||||||
|
|
||||||
|
_Select a directory for models to import_ You may select a local
|
||||||
|
directory for autoimporting at startup time. If you select this
|
||||||
|
option, the directory you choose will be scanned for new
|
||||||
|
.ckpt/.safetensors files each time InvokeAI starts up, and any new
|
||||||
|
files will be automatically imported and made available for your
|
||||||
|
use.
|
||||||
|
|
||||||
|
_Convert imported models into diffusers_ When legacy checkpoint
|
||||||
|
files are imported, you may select to use them unmodified (the
|
||||||
|
default) or to convert them into `diffusers` models. The latter
|
||||||
|
load much faster and have slightly better rendering performance,
|
||||||
|
but not all checkpoint files can be converted. Note that Stable Diffusion
|
||||||
|
Version 2.X files are **only** supported in `diffusers` format and will
|
||||||
|
be converted regardless.
|
||||||
|
|
||||||
|
_You can come back to the model install form_ as many times as you like.
|
||||||
|
From the `invoke.sh` or `invoke.bat` launcher, select option (5) to relaunch
|
||||||
|
this script. On the command line, it is named `invokeai-model-install`.
|
||||||
|
|
||||||
|
12. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
|
||||||
for the directory `invokeai` installed in the location you chose at the
|
for the directory `invokeai` installed in the location you chose at the
|
||||||
beginning of the install session. Look for a shell script named `invoke.sh`
|
beginning of the install session. Look for a shell script named `invoke.sh`
|
||||||
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
||||||
@ -205,17 +342,17 @@ version of InvokeAI with the option to upgrade to experimental versions later.
|
|||||||
C:\Documents\Linco\invokeAI> invoke.bat
|
C:\Documents\Linco\invokeAI> invoke.bat
|
||||||
```
|
```
|
||||||
|
|
||||||
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
|
- The `invoke.bat` (`invoke.sh`) script will give you the choice
|
||||||
(1) the command-line interface, or (2) the web GUI. If you start the
|
of starting (1) the command-line interface, (2) the web GUI, (3)
|
||||||
latter, you can load the user interface by pointing your browser at
|
textual inversion training, and (4) model merging.
|
||||||
http://localhost:9090.
|
|
||||||
|
|
||||||
- The script also offers you a third option labeled "open the developer
|
- By default, the script will launch the web interface. When you
|
||||||
console". If you choose this option, you will be dropped into a
|
do this, you'll see a series of startup messages ending with
|
||||||
command-line interface in which you can run python commands directly,
|
instructions to point your browser at
|
||||||
access developer tools, and launch InvokeAI with customized options.
|
http://localhost:9090. Click on this link to open up a browser
|
||||||
|
and start exploring InvokeAI's features.
|
||||||
|
|
||||||
12. You can launch InvokeAI with several different command-line arguments that
|
12. **InvokeAI Options**: You can launch InvokeAI with several different command-line arguments that
|
||||||
customize its behavior. For example, you can change the location of the
|
customize its behavior. For example, you can change the location of the
|
||||||
image output directory, or select your favorite sampler. See the
|
image output directory, or select your favorite sampler. See the
|
||||||
[Command-Line Interface](../features/CLI.md) for a full list of the options.
|
[Command-Line Interface](../features/CLI.md) for a full list of the options.
|
||||||
@ -225,29 +362,63 @@ version of InvokeAI with the option to upgrade to experimental versions later.
|
|||||||
`invokeai\invokeai.init`. It contains a variety of examples that you can
|
`invokeai\invokeai.init`. It contains a variety of examples that you can
|
||||||
follow to add and modify launch options.
|
follow to add and modify launch options.
|
||||||
|
|
||||||
!!! warning "The `invokeai` directory contains the `invokeai` application, its
|
- The launcher script also offers you an option labeled "open the developer
|
||||||
|
console". If you choose this option, you will be dropped into a
|
||||||
|
command-line interface in which you can run python commands directly,
|
||||||
|
access developer tools, and launch InvokeAI with customized options.
|
||||||
|
|
||||||
|
|
||||||
|
!!! warning "Do not move or remove the `invokeai` directory"
|
||||||
|
|
||||||
|
The `invokeai` directory contains the `invokeai` application, its
|
||||||
configuration files, the model weight files, and outputs of image generation.
|
configuration files, the model weight files, and outputs of image generation.
|
||||||
Once InvokeAI is installed, do not move or remove this directory."
|
Once InvokeAI is installed, do not move or remove this directory."
|
||||||
|
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
### _Package dependency conflicts_
|
### _Package dependency conflicts_
|
||||||
|
|
||||||
If you have previously installed InvokeAI or another Stable Diffusion package,
|
If you have previously installed InvokeAI or another Stable Diffusion
|
||||||
the installer may occasionally pick up outdated libraries and either the
|
package, the installer may occasionally pick up outdated libraries and
|
||||||
installer or `invoke` will fail with complaints about library conflicts. You can
|
either the installer or `invoke` will fail with complaints about
|
||||||
address this by entering the `invokeai` directory and running `update.sh`, which
|
library conflicts. In this case, run the `invoke.sh`/`invoke.bat`
|
||||||
will bring InvokeAI up to date with the latest libraries.
|
command and enter the Developer's Console by picking option (5). This
|
||||||
|
will take you to a command-line prompt.
|
||||||
|
|
||||||
### ldm from pypi
|
Then give this command:
|
||||||
|
|
||||||
!!! warning
|
`pip install InvokeAI --force-reinstall`
|
||||||
|
|
||||||
Some users have tried to correct dependency problems by installing
|
This should fix the issues.
|
||||||
the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
|
|
||||||
has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
|
### InvokeAI runs extremely slowly on Linux or Windows systems
|
||||||
ldm will make matters worse. If you've installed ldm, uninstall it with
|
|
||||||
`pip uninstall ldm`.
|
The most frequent cause of this problem is when the installation
|
||||||
|
process installed the CPU-only version of the torch machine-learning
|
||||||
|
library, rather than a version that takes advantage of GPU
|
||||||
|
acceleration. To confirm this issue, look at the InvokeAI startup
|
||||||
|
messages. If you see a message saying ">> Using device CPU", then
|
||||||
|
this is what happened.
|
||||||
|
|
||||||
|
To fix this problem, first determine whether you have an NVidia or an
|
||||||
|
AMD GPU. The former uses the CUDA driver, and the latter uses ROCm
|
||||||
|
(only available on Linux). Then run the `invoke.sh`/`invoke.bat`
|
||||||
|
command and enter the Developer's Console by picking option (5). This
|
||||||
|
will take you to a command-line prompt.
|
||||||
|
|
||||||
|
Then type the following commands:
|
||||||
|
|
||||||
|
=== "NVIDIA System"
|
||||||
|
```bash
|
||||||
|
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
|
pip install xformers
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "AMD System"
|
||||||
|
```bash
|
||||||
|
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
|
```
|
||||||
|
|
||||||
### Corrupted configuration file
|
### Corrupted configuration file
|
||||||
|
|
||||||
@ -272,7 +443,53 @@ the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
|||||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
||||||
assistance.
|
assistance.
|
||||||
|
|
||||||
### other problems
|
### Out of Memory Issues
|
||||||
|
|
||||||
|
The models are large, VRAM is expensive, and you may find yourself
|
||||||
|
faced with Out of Memory errors when generating images. Here are some
|
||||||
|
tips to reduce the problem:
|
||||||
|
|
||||||
|
* **4 GB of VRAM**
|
||||||
|
|
||||||
|
This should be adequate for 512x512 pixel images using Stable Diffusion 1.5
|
||||||
|
and derived models, provided that you **disable** the NSFW checker. To
|
||||||
|
disable the filter, do one of the following:
|
||||||
|
|
||||||
|
* Select option (6) "_change InvokeAI startup options_" from the
|
||||||
|
launcher. This will bring up the console-based startup settings
|
||||||
|
dialogue and allow you to unselect the "NSFW Checker" option.
|
||||||
|
* Start the startup settings dialogue directly by running
|
||||||
|
`invokeai-configure --skip-sd-weights --skip-support-models`
|
||||||
|
from the command line.
|
||||||
|
* Find the `invokeai.init` initialization file in the InvokeAI root
|
||||||
|
directory, open it in a text editor, and change `--nsfw_checker`
|
||||||
|
to `--no-nsfw_checker`
|
||||||
|
|
||||||
|
If you are on a CUDA system, you can realize significant memory
|
||||||
|
savings by activating the `xformers` library as described above. The
|
||||||
|
downside is `xformers` introduces non-deterministic behavior, such
|
||||||
|
that images generated with exactly the same prompt and settings will
|
||||||
|
be slightly different from each other. See above for more information.
|
||||||
|
|
||||||
|
* **6 GB of VRAM**
|
||||||
|
|
||||||
|
This is a border case. Using the SD 1.5 series you should be able to
|
||||||
|
generate images up to 640x640 with the NSFW checker enabled, and up to
|
||||||
|
1024x1024 with it disabled and `xformers` activated.
|
||||||
|
|
||||||
|
If you run into persistent memory issues there are a series of
|
||||||
|
environment variables that you can set before launching InvokeAI that
|
||||||
|
alter how the PyTorch machine learning library manages memory. See
|
||||||
|
https://pytorch.org/docs/stable/notes/cuda.html#memory-management for
|
||||||
|
a list of these tweaks.
|
||||||
|
|
||||||
|
* **12 GB of VRAM**
|
||||||
|
|
||||||
|
This should be sufficient to generate larger images up to about
|
||||||
|
1280x1280. If you wish to push further, consider activating
|
||||||
|
`xformers`.
|
||||||
|
|
||||||
|
### Other Problems
|
||||||
|
|
||||||
If you run into problems during or after installation, the InvokeAI team is
|
If you run into problems during or after installation, the InvokeAI team is
|
||||||
available to help you. Either create an
|
available to help you. Either create an
|
||||||
@ -284,36 +501,20 @@ hours, and often much sooner.
|
|||||||
|
|
||||||
## Updating to newer versions
|
## Updating to newer versions
|
||||||
|
|
||||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
This distribution is changing rapidly, and we add new features
|
||||||
To update to the latest released version (recommended), run the `update.sh`
|
regularly. Releases are announced at
|
||||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
http://github.com/invoke-ai/InvokeAI/releases, and at
|
||||||
release and re-run the `invokeai-configure` script to download any updated
|
https://pypi.org/project/InvokeAI/ To update to the latest released
|
||||||
models files that may be needed. You can also use this to add additional models
|
version (recommended), follow these steps:
|
||||||
that you did not select at installation time.
|
|
||||||
|
|
||||||
You can now close the developer console and run `invoke` as before. If you get
|
1. Start the `invoke.sh`/`invoke.bat` launch script from within the
|
||||||
complaints about missing models, then you may need to do the additional step of
|
`invokeai` root directory.
|
||||||
running `invokeai-configure`. This happens relatively infrequently. To do
|
|
||||||
this, simply open up the developer's console again and type
|
|
||||||
`invokeai-configure`.
|
|
||||||
|
|
||||||
You may also use the `update` script to install any selected version of
|
2. Choose menu item (10) "Update InvokeAI".
|
||||||
InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
|
|
||||||
link of the version you wish to install. You can find the zip links by going to
|
|
||||||
the one of the release pages and looking for the **Assets** section at the
|
|
||||||
bottom. Alternatively, you can browse "branches" and "tags" at the top of the
|
|
||||||
big code directory on the InvokeAI welcome page. When you find the version you
|
|
||||||
want to install, go to the green "<> Code" button at the top, and copy the
|
|
||||||
"Download ZIP" link.
|
|
||||||
|
|
||||||
Now run `update.sh` (or `update.bat`) with the version number of the desired InvokeAI
|
3. This will launch a menu that gives you the option of:
|
||||||
version as its argument. For example, this will install the old 2.2.0 release.
|
|
||||||
|
|
||||||
```cmd
|
1. Updating to the latest official release;
|
||||||
update.sh v2.2.0
|
2. Updating to the bleeding-edge development version; or
|
||||||
```
|
3. Manually entering the tag or branch name of a version of
|
||||||
|
InvokeAI you wish to try out.
|
||||||
You can get the list of version numbers by going to the [releases
|
|
||||||
page](https://github.com/invoke-ai/InvokeAI/releases) or by browsing
|
|
||||||
the (Tags)[https://github.com/invoke-ai/InvokeAI/tags] list from the
|
|
||||||
Code section of the main github page.
|
|
||||||
|
@ -14,24 +14,56 @@ title: Installing Manually
|
|||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
You have two choices for manual installation. The [first one](#pip-Install) uses
|
!!! tip "Conda"
|
||||||
basic Python virtual environment (`venv`) command and `pip` package manager. The
|
As of InvokeAI v2.3.0 installation using the `conda` package manager is no longer being supported. It will likely still work, but we are not testing this installation method.
|
||||||
[second one](#Conda-method) uses Anaconda3 package manager (`conda`). Both
|
|
||||||
methods require you to enter commands on the terminal, also known as the
|
|
||||||
"console".
|
|
||||||
|
|
||||||
Note that the `conda` installation method is currently deprecated and will not
|
|
||||||
be supported at some point in the future.
|
|
||||||
|
|
||||||
On Windows systems, you are encouraged to install and use the
|
On Windows systems, you are encouraged to install and use the
|
||||||
[PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
[PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
||||||
which provides compatibility with Linux and Mac shells and nice features such as
|
which provides compatibility with Linux and Mac shells and nice
|
||||||
command-line completion.
|
features such as command-line completion.
|
||||||
|
|
||||||
## pip Install
|
### Prerequisites
|
||||||
|
|
||||||
To install InvokeAI with virtual environments and the PIP package manager,
|
Before you start, make sure you have the following preqrequisites
|
||||||
please follow these steps:
|
installed. These are described in more detail in [Automated
|
||||||
|
Installation](010_INSTALL_AUTOMATED.md), and in many cases will
|
||||||
|
already be installed (if, for example, you have used your system for
|
||||||
|
gaming):
|
||||||
|
|
||||||
|
* **Python**
|
||||||
|
|
||||||
|
version 3.9 or 3.10 (3.11 is not recommended).
|
||||||
|
|
||||||
|
* **CUDA Tools**
|
||||||
|
|
||||||
|
For those with _NVidia GPUs_, you will need to
|
||||||
|
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
|
||||||
|
|
||||||
|
* **ROCm Tools**
|
||||||
|
|
||||||
|
For _Linux users with AMD GPUs_, you will need
|
||||||
|
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
|
||||||
|
InvokeAI does not support AMD GPUs on Windows systems due to
|
||||||
|
lack of a Windows ROCm library.
|
||||||
|
|
||||||
|
* **Visual C++ Libraries**
|
||||||
|
|
||||||
|
_Windows users_ must install the free
|
||||||
|
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
|
||||||
|
|
||||||
|
* **The Xcode command line tools**
|
||||||
|
|
||||||
|
for _Macintosh users_. Instructions are available at
|
||||||
|
[Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
|
||||||
|
|
||||||
|
* _Macintosh users_ may also need to run the `Install Certificates` command
|
||||||
|
if model downloads give lots of certificate errors. Run:
|
||||||
|
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||||
|
|
||||||
|
### Installation Walkthrough
|
||||||
|
|
||||||
|
To install InvokeAI with virtual environments and the PIP package
|
||||||
|
manager, please follow these steps:
|
||||||
|
|
||||||
1. Please make sure you are using Python 3.9 or 3.10. The rest of the install
|
1. Please make sure you are using Python 3.9 or 3.10. The rest of the install
|
||||||
procedure depends on this and will not work with other versions:
|
procedure depends on this and will not work with other versions:
|
||||||
@ -40,54 +72,127 @@ please follow these steps:
|
|||||||
python -V
|
python -V
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
2. Create a directory to contain your InvokeAI library, configuration
|
||||||
GitHub:
|
files, and models. This is known as the "runtime" or "root"
|
||||||
|
directory, and often lives in your home directory under the name `invokeai`.
|
||||||
|
|
||||||
|
Please keep in mind the disk space requirements - you will need at
|
||||||
|
least 20GB for the models and the virtual environment. From now
|
||||||
|
on we will refer to this directory as `INVOKEAI_ROOT`. For convenience,
|
||||||
|
the steps below create a shell variable of that name which contains the
|
||||||
|
path to `HOME/invokeai`.
|
||||||
|
|
||||||
|
=== "Linux/Mac"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
export INVOKEAI_ROOT=~/invokeai
|
||||||
|
mkdir $INVOKEAI_ROOT
|
||||||
```
|
```
|
||||||
|
|
||||||
This will create InvokeAI folder where you will follow the rest of the
|
=== "Windows (Powershell)"
|
||||||
steps.
|
|
||||||
|
```bash
|
||||||
3. From within the InvokeAI top-level directory, create and activate a virtual
|
Set-Variable -Name INVOKEAI_ROOT -Value $Home/invokeai
|
||||||
environment named `.venv` and prompt displaying `InvokeAI`:
|
mkdir $INVOKEAI_ROOT
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Enter the root (invokeai) directory and create a virtual Python
|
||||||
|
environment within it named `.venv`. If the command `python`
|
||||||
|
doesn't work, try `python3`. Note that while you may create the
|
||||||
|
virtual environment anywhere in the file system, we recommend that
|
||||||
|
you create it within the root directory as shown here. This makes
|
||||||
|
it possible for the InvokeAI applications to find the model data
|
||||||
|
and configuration. If you do not choose to install the virtual
|
||||||
|
environment inside the root directory, then you **must** set the
|
||||||
|
`INVOKEAI_ROOT` environment variable in your shell environment, for
|
||||||
|
example, by editing `~/.bashrc` or `~/.zshrc` files, or setting the
|
||||||
|
Windows environment variable using the Advanced System Settings dialogue.
|
||||||
|
Refer to your operating system documentation for details.
|
||||||
|
|
||||||
|
```terminal
|
||||||
|
cd $INVOKEAI_ROOT
|
||||||
|
python -m venv .venv --prompt InvokeAI
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Activate the new environment:
|
||||||
|
|
||||||
|
=== "Linux/Mac"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python -m venv .venv \
|
|
||||||
--prompt InvokeAI \
|
|
||||||
--upgrade-deps
|
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Make sure that pip is installed in your virtual environment an up to date:
|
=== "Windows"
|
||||||
|
|
||||||
```bash
|
```ps
|
||||||
python -m ensurepip \
|
.venv\Scripts\activate
|
||||||
--upgrade
|
|
||||||
python -m pip install \
|
|
||||||
--upgrade pip
|
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Install Package
|
If you get a permissions error at this point, run this command and try again
|
||||||
|
|
||||||
|
`Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`
|
||||||
|
|
||||||
|
The command-line prompt should change to to show `(InvokeAI)` at the
|
||||||
|
beginning of the prompt. Note that all the following steps should be
|
||||||
|
run while inside the INVOKEAI_ROOT directory
|
||||||
|
|
||||||
|
5. Make sure that pip is installed in your virtual environment and up to date:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install --use-pep517 .
|
python -m pip install --upgrade pip
|
||||||
```
|
```
|
||||||
|
|
||||||
6. Set up the runtime directory
|
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among
|
||||||
|
CUDA, ROCm and CPU/MPS drivers as shown below:
|
||||||
|
|
||||||
In this step you will initialize a runtime directory that will contain the
|
=== "CUDA (NVidia)"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "ROCm (AMD)"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "CPU (Intel Macs & non-GPU systems)"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "MPS (M1 and M2 Macs)"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install InvokeAI --use-pep517
|
||||||
|
```
|
||||||
|
|
||||||
|
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
|
||||||
|
become available in the environment
|
||||||
|
|
||||||
|
=== "Linux/Macintosh"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
deactivate && source .venv/bin/activate
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Windows"
|
||||||
|
|
||||||
|
```ps
|
||||||
|
deactivate
|
||||||
|
.venv\Scripts\activate
|
||||||
|
```
|
||||||
|
|
||||||
|
8. Set up the runtime directory
|
||||||
|
|
||||||
|
In this step you will initialize your runtime directory with the downloaded
|
||||||
models, model config files, directory for textual inversion embeddings, and
|
models, model config files, directory for textual inversion embeddings, and
|
||||||
your outputs. This keeps the runtime directory separate from the source code
|
your outputs.
|
||||||
and aids in updating.
|
|
||||||
|
|
||||||
You may pick any location for this directory using the `--root_dir` option
|
```terminal
|
||||||
(abbreviated --root). If you don't pass this option, it will default to
|
invokeai-configure
|
||||||
`~/invokeai`.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
invokeai-configure --root_dir ~/Programs/invokeai
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The script `invokeai-configure` will interactively guide you through the
|
The script `invokeai-configure` will interactively guide you through the
|
||||||
@ -101,45 +206,41 @@ please follow these steps:
|
|||||||
If you get an error message about a module not being installed, check that
|
If you get an error message about a module not being installed, check that
|
||||||
the `invokeai` environment is active and if not, repeat step 5.
|
the `invokeai` environment is active and if not, repeat step 5.
|
||||||
|
|
||||||
Note that `invokeai-configure` and `invokeai` should be installed under your
|
|
||||||
virtual environment directory and the system should find them on the PATH.
|
|
||||||
If this isn't working on your system, you can call the scripts directory
|
|
||||||
using `python scripts/configure_invokeai.py` and `python scripts/invoke.py`.
|
|
||||||
|
|
||||||
!!! tip
|
!!! tip
|
||||||
|
|
||||||
If you have already downloaded the weights file(s) for another Stable
|
If you have already downloaded the weights file(s) for another Stable
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||||
process for this is described in [here](050_INSTALLING_MODELS.md).
|
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
|
||||||
|
|
||||||
7. Run the command-line- or the web- interface:
|
9. Run the command-line- or the web- interface:
|
||||||
|
|
||||||
Activate the environment (with `source .venv/bin/activate`), and then run
|
From within INVOKEAI_ROOT, activate the environment
|
||||||
the script `invokeai`. If you selected a non-default location for the
|
(with `source .venv/bin/activate` or `.venv\scripts\activate), and then run
|
||||||
runtime directory, please specify the path with the `--root_dir` option
|
the script `invokeai`. If the virtual environment you selected is NOT inside
|
||||||
(abbreviated below as `--root`):
|
INVOKEAI_ROOT, then you must specify the path to the root directory by adding
|
||||||
|
`--root_dir \path\to\invokeai` to the commands below:
|
||||||
|
|
||||||
!!! example ""
|
!!! example ""
|
||||||
|
|
||||||
!!! warning "Make sure that the virtual environment is activated, which should create `(invokeai)` in front of your prompt!"
|
!!! warning "Make sure that the virtual environment is activated, which should create `(.venv)` in front of your prompt!"
|
||||||
|
|
||||||
=== "CLI"
|
=== "CLI"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invoke.py --root ~/Programs/invokeai
|
invokeai
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "local Webserver"
|
=== "local Webserver"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invoke.py --web --root ~/Programs/invokeai
|
invokeai --web
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Public Webserver"
|
=== "Public Webserver"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
|
invokeai --web --host 0.0.0.0
|
||||||
```
|
```
|
||||||
|
|
||||||
If you choose the run the web interface, point your browser at
|
If you choose the run the web interface, point your browser at
|
||||||
@ -147,54 +248,122 @@ please follow these steps:
|
|||||||
|
|
||||||
!!! tip
|
!!! tip
|
||||||
|
|
||||||
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of the directory.
|
You can permanently set the location of the runtime directory
|
||||||
|
by setting the environment variable `INVOKEAI_ROOT` to the
|
||||||
|
path of the directory. As mentioned previously, this is
|
||||||
|
*highly recommended** if your virtual environment is located outside of
|
||||||
|
your runtime directory.
|
||||||
|
|
||||||
8. Render away!
|
10. Render away!
|
||||||
|
|
||||||
Browse the [features](../features/CLI.md) section to learn about all the
|
Browse the [features](../features/CLI.md) section to learn about all the
|
||||||
things you can do with InvokeAI.
|
things you can do with InvokeAI.
|
||||||
|
|
||||||
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
|
||||||
card with the ROCm driver, you may have to wait for over a minute the first
|
|
||||||
time you try to generate an image. Fortunately, after the warm-up period
|
|
||||||
rendering will be fast.
|
|
||||||
|
|
||||||
9. Subsequently, to relaunch the script, activate the virtual environment, and
|
11. Subsequently, to relaunch the script, activate the virtual environment, and
|
||||||
then launch `invokeai` command. If you forget to activate the virtual
|
then launch `invokeai` command. If you forget to activate the virtual
|
||||||
environment you will most likeley receive a `command not found` error.
|
environment you will most likeley receive a `command not found` error.
|
||||||
|
|
||||||
!!! tip
|
!!! warning
|
||||||
|
|
||||||
Do not move the source code repository after installation. The virtual environment directory has absolute paths in it that get confused if the directory is moved.
|
Do not move the runtime directory after installation. The virtual environment will get confused if the directory is moved.
|
||||||
|
|
||||||
## Creating an "install" version of InvokeAI
|
12. Other scripts
|
||||||
|
|
||||||
If you wish you can install InvokeAI and all its dependencies in the runtime
|
The [Textual Inversion](../features/TEXTUAL_INVERSION.md) script can be launched with the command:
|
||||||
directory. This allows you to delete the source code repository and eliminates
|
|
||||||
the need to provide `--root_dir` at startup time. Note that this method only
|
|
||||||
works with the PIP method.
|
|
||||||
|
|
||||||
1. Follow the instructions for the PIP install, but in step #2 put the virtual
|
|
||||||
environment into the runtime directory. For example, assuming the runtime
|
|
||||||
directory lives in `~/Programs/invokeai`, you'd run:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python -m venv ~/Programs/invokeai
|
invokeai-ti --gui
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Now follow steps 3 to 5 in the PIP recipe, ending with the `pip install`
|
Similarly, the [Model Merging](../features/MODEL_MERGING.md) script can be launched with the command:
|
||||||
step.
|
|
||||||
|
|
||||||
3. Run one additional step while you are in the source code repository directory
|
```bash
|
||||||
|
invokeai-merge --gui
|
||||||
```
|
|
||||||
pip install --use-pep517 . # note the dot in the end!!!
|
|
||||||
```
|
```
|
||||||
|
|
||||||
4. That's all! Now, whenever you activate the virtual environment, `invokeai`
|
Leave off the `--gui` option to run the script using command-line arguments. Pass the `--help` argument
|
||||||
will know where to look for the runtime directory without needing a
|
to get usage instructions.
|
||||||
`--root_dir` argument. In addition, you can now move or delete the source
|
|
||||||
code repository entirely.
|
|
||||||
|
|
||||||
(Don't move the runtime directory!)
|
### Developer Install
|
||||||
|
|
||||||
|
If you have an interest in how InvokeAI works, or you would like to
|
||||||
|
add features or bugfixes, you are encouraged to install the source
|
||||||
|
code for InvokeAI. For this to work, you will need to install the
|
||||||
|
`git` source code management program. If it is not already installed
|
||||||
|
on your system, please see the [Git Installation
|
||||||
|
Guide](https://github.com/git-guides/install-git)
|
||||||
|
|
||||||
|
1. From the command line, run this command:
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||||
|
```
|
||||||
|
|
||||||
|
This will create a directory named `InvokeAI` and populate it with the
|
||||||
|
full source code from the InvokeAI repository.
|
||||||
|
|
||||||
|
2. Activate the InvokeAI virtual environment as per step (4) of the manual
|
||||||
|
installation protocol (important!)
|
||||||
|
|
||||||
|
3. Enter the InvokeAI repository directory and run one of these
|
||||||
|
commands, based on your GPU:
|
||||||
|
|
||||||
|
=== "CUDA (NVidia)"
|
||||||
|
```bash
|
||||||
|
pip install -e .[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "ROCm (AMD)"
|
||||||
|
```bash
|
||||||
|
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "CPU (Intel Macs & non-GPU systems)"
|
||||||
|
```bash
|
||||||
|
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "MPS (M1 and M2 Macs)"
|
||||||
|
```bash
|
||||||
|
pip install -e . --use-pep517
|
||||||
|
```
|
||||||
|
|
||||||
|
Be sure to pass `-e` (for an editable install) and don't forget the
|
||||||
|
dot ("."). It is part of the command.
|
||||||
|
|
||||||
|
You can now run `invokeai` and its related commands. The code will be
|
||||||
|
read from the repository, so that you can edit the .py source files
|
||||||
|
and watch the code's behavior change.
|
||||||
|
|
||||||
|
4. If you wish to contribute to the InvokeAI project, you are
|
||||||
|
encouraged to establish a GitHub account and "fork"
|
||||||
|
https://github.com/invoke-ai/InvokeAI into your own copy of the
|
||||||
|
repository. You can then use GitHub functions to create and submit
|
||||||
|
pull requests to contribute improvements to the project.
|
||||||
|
|
||||||
|
Please see [Contributing](../index.md#contributing) for hints
|
||||||
|
on getting started.
|
||||||
|
|
||||||
|
### Unsupported Conda Install
|
||||||
|
|
||||||
|
Congratulations, you found the "secret" Conda installation
|
||||||
|
instructions. If you really **really** want to use Conda with InvokeAI
|
||||||
|
you can do so using this unsupported recipe:
|
||||||
|
|
||||||
|
```
|
||||||
|
mkdir ~/invokeai
|
||||||
|
conda create -n invokeai python=3.10
|
||||||
|
conda activate invokeai
|
||||||
|
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||||
|
invokeai-configure --root ~/invokeai
|
||||||
|
invokeai --root ~/invokeai --web
|
||||||
|
```
|
||||||
|
|
||||||
|
The `pip install` command shown in this recipe is for Linux/Windows
|
||||||
|
systems with an NVIDIA GPU. See step (6) above for the command to use
|
||||||
|
with other platforms/GPU combinations. If you don't wish to pass the
|
||||||
|
`--root` argument to `invokeai` with each launch, you may set the
|
||||||
|
environment variable INVOKEAI_ROOT to point to the installation directory.
|
||||||
|
|
||||||
|
Note that if you run into problems with the Conda installation, the InvokeAI
|
||||||
|
staff will **not** be able to help you out. Caveat Emptor!
|
||||||
|
125
docs/installation/030_INSTALL_CUDA_AND_ROCM.md
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
---
|
||||||
|
title: NVIDIA Cuda / AMD ROCm
|
||||||
|
---
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|
# :simple-nvidia: CUDA | :simple-amd: ROCm
|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
In order for InvokeAI to run at full speed, you will need a graphics
|
||||||
|
card with a supported GPU. InvokeAI supports NVidia cards via the CUDA
|
||||||
|
driver on Windows and Linux, and AMD cards via the ROCm driver on Linux.
|
||||||
|
|
||||||
|
## :simple-nvidia: CUDA
|
||||||
|
|
||||||
|
### Linux and Windows Install
|
||||||
|
|
||||||
|
If you have used your system for other graphics-intensive tasks, such
|
||||||
|
as gaming, you may very well already have the CUDA drivers
|
||||||
|
installed. To confirm, open up a command-line window and type:
|
||||||
|
|
||||||
|
```
|
||||||
|
nvidia-smi
|
||||||
|
```
|
||||||
|
|
||||||
|
If this command produces a status report on the GPU(s) installed on
|
||||||
|
your system, CUDA is installed and you have no more work to do. If
|
||||||
|
instead you get "command not found", or similar, then the driver will
|
||||||
|
need to be installed.
|
||||||
|
|
||||||
|
We strongly recommend that you install the CUDA Toolkit package
|
||||||
|
directly from NVIDIA. **Do not try to install Ubuntu's
|
||||||
|
nvidia-cuda-toolkit package. It is out of date and will cause
|
||||||
|
conflicts among the NVIDIA driver and binaries.**
|
||||||
|
|
||||||
|
Go to [CUDA Toolkit 11.7
|
||||||
|
Downloads](https://developer.nvidia.com/cuda-11-7-0-download-archive),
|
||||||
|
and use the target selection wizard to choose your operating system,
|
||||||
|
hardware platform, and preferred installation method (e.g. "local"
|
||||||
|
versus "network").
|
||||||
|
|
||||||
|
This will provide you with a downloadable install file or, depending
|
||||||
|
on your choices, a recipe for downloading and running a install shell
|
||||||
|
script. Be sure to read and follow the full installation instructions.
|
||||||
|
|
||||||
|
After an install that seems successful, you can confirm by again
|
||||||
|
running `nvidia-smi` from the command line.
|
||||||
|
|
||||||
|
### Linux Install with a Runtime Container
|
||||||
|
|
||||||
|
On Linux systems, an alternative to installing CUDA Toolkit directly on
|
||||||
|
your system is to run an NVIDIA software container that has the CUDA
|
||||||
|
libraries already in place. This is recommended if you are already
|
||||||
|
familiar with containerization technologies such as Docker.
|
||||||
|
|
||||||
|
For downloads and instructions, visit the [NVIDIA CUDA Container
|
||||||
|
Runtime Site](https://developer.nvidia.com/nvidia-container-runtime)
|
||||||
|
|
||||||
|
### Torch Installation
|
||||||
|
|
||||||
|
When installing torch and torchvision manually with `pip`, remember to provide
|
||||||
|
the argument `--extra-index-url
|
||||||
|
https://download.pytorch.org/whl/cu117` as described in the [Manual
|
||||||
|
Installation Guide](020_INSTALL_MANUAL.md).
|
||||||
|
|
||||||
|
## :simple-amd: ROCm
|
||||||
|
|
||||||
|
### Linux Install
|
||||||
|
|
||||||
|
AMD GPUs are only supported on Linux platforms due to the lack of a
|
||||||
|
Windows ROCm driver at the current time. Also be aware that support
|
||||||
|
for newer AMD GPUs is spotty. Your mileage may vary.
|
||||||
|
|
||||||
|
It is possible that the ROCm driver is already installed on your
|
||||||
|
machine. To test, open up a terminal window and issue the following
|
||||||
|
command:
|
||||||
|
|
||||||
|
```
|
||||||
|
rocm-smi
|
||||||
|
```
|
||||||
|
|
||||||
|
If you get a table labeled "ROCm System Management Interface" the
|
||||||
|
driver is installed and you are done. If you get "command not found,"
|
||||||
|
then the driver needs to be installed.
|
||||||
|
|
||||||
|
Go to AMD's [ROCm Downloads
|
||||||
|
Guide](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation_new.html#installation-methods)
|
||||||
|
and scroll to the _Installation Methods_ section. Find the subsection
|
||||||
|
for the install method for your preferred Linux distribution, and
|
||||||
|
issue the commands given in the recipe.
|
||||||
|
|
||||||
|
Annoyingly, the official AMD site does not have a recipe for the most
|
||||||
|
recent version of Ubuntu, 22.04. However, this [community-contributed
|
||||||
|
recipe](https://novaspirit.github.io/amdgpu-rocm-ubu22/) is reported
|
||||||
|
to work well.
|
||||||
|
|
||||||
|
After installation, please run `rocm-smi` a second time to confirm
|
||||||
|
that the driver is present and the GPU is recognized. You may need to
|
||||||
|
do a reboot in order to load the driver.
|
||||||
|
|
||||||
|
### Linux Install with a ROCm-docker Container
|
||||||
|
|
||||||
|
If you are comfortable with the Docker containerization system, then
|
||||||
|
you can build a ROCm docker file. The source code and installation
|
||||||
|
recipes are available
|
||||||
|
[Here](https://github.com/RadeonOpenCompute/ROCm-docker/blob/master/quick-start.md)
|
||||||
|
|
||||||
|
### Torch Installation
|
||||||
|
|
||||||
|
When installing torch and torchvision manually with `pip`, remember to provide
|
||||||
|
the argument `--extra-index-url
|
||||||
|
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
|
||||||
|
Installation Guide](020_INSTALL_MANUAL.md).
|
||||||
|
|
||||||
|
This will be done automatically for you if you use the installer
|
||||||
|
script.
|
||||||
|
|
||||||
|
Be aware that the torch machine learning library does not seamlessly
|
||||||
|
interoperate with all AMD GPUs and you may experience garbled images,
|
||||||
|
black images, or long startup delays before rendering commences. Most
|
||||||
|
of these issues can be solved by Googling for workarounds. If you have
|
||||||
|
a problem and find a solution, please post an
|
||||||
|
[Issue](https://github.com/invoke-ai/InvokeAI/issues) so that other
|
||||||
|
users benefit and we can update this document.
|
@ -16,10 +16,6 @@ title: Installing with Docker
|
|||||||
|
|
||||||
For general use, install locally to leverage your machine's GPU.
|
For general use, install locally to leverage your machine's GPU.
|
||||||
|
|
||||||
!!! tip "For running on a cloud instance/service"
|
|
||||||
|
|
||||||
Check out the [Running InvokeAI in the cloud with Docker](#running-invokeai-in-the-cloud-with-docker) section below
|
|
||||||
|
|
||||||
## Why containers?
|
## Why containers?
|
||||||
|
|
||||||
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
||||||
@ -78,38 +74,40 @@ Some Suggestions of variables you may want to change besides the Token:
|
|||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
| Environment-Variable | Default value | Description |
|
| Environment-Variable <img width="220" align="right"/> | Default value <img width="360" align="right"/> | Description |
|
||||||
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
|
| ----------------------------------------------------- | ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
|
| `HUGGING_FACE_HUB_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
|
||||||
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
|
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
|
||||||
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
|
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
|
||||||
| `ARCH` | arch of the build machine | can be changed if you want to build the image for another arch |
|
| `ARCH` | arch of the build machine | Can be changed if you want to build the image for another arch |
|
||||||
| `INVOKEAI_TAG` | latest | the Container Repository / Tag which will be used |
|
| `CONTAINER_REGISTRY` | ghcr.io | Name of the Container Registry to use for the full tag |
|
||||||
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
|
| `CONTAINER_REPOSITORY` | `$(whoami)/${REPOSITORY_NAME}` | Name of the Container Repository |
|
||||||
| `CONTAINER_FLAVOR` | cuda | the flavor of the image, which can be changed if you build f.e. with amd requirements file. |
|
| `CONTAINER_FLAVOR` | `cuda` | The flavor of the image to built, available options are `cuda`, `rocm` and `cpu`. If you choose `rocm` or `cpu`, the extra-index-url will be selected automatically, unless you set one yourself. |
|
||||||
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
|
| `CONTAINER_TAG` | `${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}` | The Container Repository / Tag which will be used |
|
||||||
|
| `INVOKE_DOCKERFILE` | `Dockerfile` | The Dockerfile which should be built, handy for development |
|
||||||
|
| `PIP_EXTRA_INDEX_URL` | | If you want to use a custom pip-extra-index-url |
|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
#### Build the Image
|
#### Build the Image
|
||||||
|
|
||||||
I provided a build script, which is located in `docker-build/build.sh` but still
|
I provided a build script, which is located next to the Dockerfile in
|
||||||
needs to be executed from the Repository root.
|
`docker/build.sh`. It can be executed from repository root like this:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./docker-build/build.sh
|
./docker/build.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
The build Script not only builds the container, but also creates the docker
|
The build Script not only builds the container, but also creates the docker
|
||||||
volume if not existing yet, or if empty it will just download the models.
|
volume if not existing yet.
|
||||||
|
|
||||||
#### Run the Container
|
#### Run the Container
|
||||||
|
|
||||||
After the build process is done, you can run the container via the provided
|
After the build process is done, you can run the container via the provided
|
||||||
`docker-build/run.sh` script
|
`docker/run.sh` script
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./docker-build/run.sh
|
./docker/run.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
When used without arguments, the container will start the webserver and provide
|
When used without arguments, the container will start the webserver and provide
|
||||||
@ -119,7 +117,7 @@ also do so.
|
|||||||
!!! example "run script example"
|
!!! example "run script example"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
|
./docker/run.sh "banana sushi" -Ak_lms -S42 -s10
|
||||||
```
|
```
|
||||||
|
|
||||||
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
||||||
@ -130,16 +128,18 @@ also do so.
|
|||||||
|
|
||||||
## Running the container on your GPU
|
## Running the container on your GPU
|
||||||
|
|
||||||
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running the container with an extra
|
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running
|
||||||
environment variable to enable GPU usage and have the process run much faster:
|
the container with an extra environment variable to enable GPU usage and have
|
||||||
|
the process run much faster:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
GPU_FLAGS=all ./docker-build/run.sh
|
GPU_FLAGS=all ./docker/run.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
This passes the `--gpus all` to docker and uses the GPU.
|
This passes the `--gpus all` to docker and uses the GPU.
|
||||||
|
|
||||||
If you don't have a GPU (or your host is not yet setup to use it) you will see a message like this:
|
If you don't have a GPU (or your host is not yet setup to use it) you will see a
|
||||||
|
message like this:
|
||||||
|
|
||||||
`docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]].`
|
`docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]].`
|
||||||
|
|
||||||
@ -147,84 +147,8 @@ You can use the full set of GPU combinations documented here:
|
|||||||
|
|
||||||
https://docs.docker.com/config/containers/resource_constraints/#gpu
|
https://docs.docker.com/config/containers/resource_constraints/#gpu
|
||||||
|
|
||||||
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to choose a specific device identified by a UUID.
|
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to
|
||||||
|
choose a specific device identified by a UUID.
|
||||||
## Running InvokeAI in the cloud with Docker
|
|
||||||
|
|
||||||
We offer an optimized Ubuntu-based image that has been well-tested in cloud deployments. Note: it also works well locally on Linux x86_64 systems with an Nvidia GPU. It *may* also work on Windows under WSL2 and on Intel Mac (not tested).
|
|
||||||
|
|
||||||
An advantage of this method is that it does not need any local setup or additional dependencies.
|
|
||||||
|
|
||||||
See the `docker-build/Dockerfile.cloud` file to familizarize yourself with the image's content.
|
|
||||||
|
|
||||||
### Prerequisites
|
|
||||||
|
|
||||||
- a `docker` runtime
|
|
||||||
- `make` (optional but helps for convenience)
|
|
||||||
- Huggingface token to download models, or an existing InvokeAI runtime directory from a previous installation
|
|
||||||
|
|
||||||
Neither local Python nor any dependencies are required. If you don't have `make` (part of `build-essentials` on Ubuntu), or do not wish to install it, the commands from the `docker-build/Makefile` are readily adaptable to be executed directly.
|
|
||||||
|
|
||||||
### Building and running the image locally
|
|
||||||
|
|
||||||
1. Clone this repo and `cd docker-build`
|
|
||||||
1. `make build` - this will build the image. (This does *not* require a GPU-capable system).
|
|
||||||
1. _(skip this step if you already have a complete InvokeAI runtime directory)_
|
|
||||||
- `make configure` (This does *not* require a GPU-capable system)
|
|
||||||
- this will create a local cache of models and configs (a.k.a the _runtime dir_)
|
|
||||||
- enter your Huggingface token when prompted
|
|
||||||
1. `make web`
|
|
||||||
1. Open the `http://localhost:9090` URL in your browser, and enjoy the banana sushi!
|
|
||||||
|
|
||||||
To use InvokeAI on the cli, run `make cli`. To open a Bash shell in the container for arbitraty advanced use, `make shell`.
|
|
||||||
|
|
||||||
#### Building and running without `make`
|
|
||||||
|
|
||||||
(Feel free to adapt paths such as `${HOME}/invokeai` to your liking, and modify the CLI arguments as necessary).
|
|
||||||
|
|
||||||
!!! example "Build the image and configure the runtime directory"
|
|
||||||
```Shell
|
|
||||||
cd docker-build
|
|
||||||
|
|
||||||
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
|
||||||
|
|
||||||
docker run --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/configure_invokeai.py"
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! example "Run the web server"
|
|
||||||
```Shell
|
|
||||||
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai -p9090:9090 local/invokeai:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
Access the Web UI at http://localhost:9090
|
|
||||||
|
|
||||||
!!! example "Run the InvokeAI interactive CLI"
|
|
||||||
```
|
|
||||||
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/invoke.py"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Running the image in the cloud
|
|
||||||
|
|
||||||
This image works anywhere you can run a container with a mounted Docker volume. You may either build this image on a cloud instance, or build and push it to your Docker registry. To manually run this on a cloud instance (such as AWS EC2, GCP or Azure VM):
|
|
||||||
|
|
||||||
1. build this image either in the cloud (you'll need to pull the repo), or locally
|
|
||||||
1. `docker tag` it as `your-registry/invokeai` and push to your registry (i.e. Dockerhub)
|
|
||||||
1. `docker pull` it on your cloud instance
|
|
||||||
1. configure the runtime directory as per above example, using `docker run ... configure_invokeai.py` script
|
|
||||||
1. use either one of the `docker run` commands above, substituting the image name for your own image.
|
|
||||||
|
|
||||||
To run this on Runpod, please refer to the following Runpod template: https://www.runpod.io/console/gpu-secure-cloud?template=vm19ukkycf (you need a Runpod subscription). When launching the template, feel free to set the image to pull your own build.
|
|
||||||
|
|
||||||
The template's `README` provides ample detail, but at a high level, the process is as follows:
|
|
||||||
|
|
||||||
1. create a pod using this Docker image
|
|
||||||
1. ensure the pod has an `INVOKEAI_ROOT=<path_to_your_persistent_volume>` environment variable, and that it corresponds to the path to your pod's persistent volume mount
|
|
||||||
1. Run the pod with `sleep infinity` as the Docker command
|
|
||||||
1. Use Runpod basic SSH to connect to the pod, and run `python scripts/configure_invokeai.py` script
|
|
||||||
1. Stop the pod, and change the Docker command to `python scripts/invoke.py --web --host 0.0.0.0`
|
|
||||||
1. Run the pod again, connect to your pod on HTTP port 9090, and enjoy the banana sushi!
|
|
||||||
|
|
||||||
Running on other cloud providers such as Vast.ai will likely work in a similar fashion.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@ -240,13 +164,12 @@ Running on other cloud providers such as Vast.ai will likely work in a similar f
|
|||||||
If you're on a **Linux container** the `invoke` script is **automatically
|
If you're on a **Linux container** the `invoke` script is **automatically
|
||||||
started** and the output dir set to the Docker volume you created earlier.
|
started** and the output dir set to the Docker volume you created earlier.
|
||||||
|
|
||||||
If you're **directly on macOS follow these startup instructions**.
|
If you're **directly on macOS follow these startup instructions**. With the
|
||||||
With the Conda environment activated (`conda activate ldm`), run the interactive
|
Conda environment activated (`conda activate ldm`), run the interactive
|
||||||
interface that combines the functionality of the original scripts `txt2img` and
|
interface that combines the functionality of the original scripts `txt2img` and
|
||||||
`img2img`:
|
`img2img`: Use the more accurate but VRAM-intensive full precision math because
|
||||||
Use the more accurate but VRAM-intensive full precision math because
|
half-precision requires autocast and won't work. By default the images are saved
|
||||||
half-precision requires autocast and won't work.
|
in `outputs/img-samples/`.
|
||||||
By default the images are saved in `outputs/img-samples/`.
|
|
||||||
|
|
||||||
```Shell
|
```Shell
|
||||||
python3 scripts/invoke.py --full_precision
|
python3 scripts/invoke.py --full_precision
|
||||||
@ -262,9 +185,9 @@ invoke> q
|
|||||||
### Text to Image
|
### Text to Image
|
||||||
|
|
||||||
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
||||||
image. This will let you know that everything is set up correctly.
|
image. This will let you know that everything is set up correctly. Then increase
|
||||||
Then increase steps to 100 or more for good (but slower) results.
|
steps to 100 or more for good (but slower) results. The prompt can be in quotes
|
||||||
The prompt can be in quotes or not.
|
or not.
|
||||||
|
|
||||||
```Shell
|
```Shell
|
||||||
invoke> The hulk fighting with sheldon cooper -s5 -n1
|
invoke> The hulk fighting with sheldon cooper -s5 -n1
|
||||||
@ -277,10 +200,9 @@ You'll need to experiment to see if face restoration is making it better or
|
|||||||
worse for your specific prompt.
|
worse for your specific prompt.
|
||||||
|
|
||||||
If you're on a container the output is set to the Docker volume. You can copy it
|
If you're on a container the output is set to the Docker volume. You can copy it
|
||||||
wherever you want.
|
wherever you want. You can download it from the Docker Desktop app, Volumes,
|
||||||
You can download it from the Docker Desktop app, Volumes, my-vol, data.
|
my-vol, data. Or you can copy it from your Mac terminal. Keep in mind
|
||||||
Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand
|
`docker cp` can't expand `*.png` so you'll need to specify the image file name.
|
||||||
`*.png` so you'll need to specify the image file name.
|
|
||||||
|
|
||||||
On your host Mac (you can use the name of any container that mounted the
|
On your host Mac (you can use the name of any container that mounted the
|
||||||
volume):
|
volume):
|
||||||
|
@ -4,249 +4,392 @@ title: Installing Models
|
|||||||
|
|
||||||
# :octicons-paintbrush-16: Installing Models
|
# :octicons-paintbrush-16: Installing Models
|
||||||
|
|
||||||
## Model Weight Files
|
## Checkpoint and Diffusers Models
|
||||||
|
|
||||||
The model weight files ('\*.ckpt') are the Stable Diffusion "secret sauce". They
|
The model checkpoint files ('\*.ckpt') are the Stable Diffusion
|
||||||
are the product of training the AI on millions of captioned images gathered from
|
"secret sauce". They are the product of training the AI on millions of
|
||||||
multiple sources.
|
captioned images gathered from multiple sources.
|
||||||
|
|
||||||
Originally there was only a single Stable Diffusion weights file, which many
|
Originally there was only a single Stable Diffusion weights file,
|
||||||
people named `model.ckpt`. Now there are dozens or more that have been "fine
|
which many people named `model.ckpt`. Now there are dozens or more
|
||||||
tuned" to provide particulary styles, genres, or other features. InvokeAI allows
|
that have been fine tuned to provide particulary styles, genres, or
|
||||||
you to install and run multiple model weight files and switch between them
|
other features. In addition, there are several new formats that
|
||||||
quickly in the command-line and web interfaces.
|
improve on the original checkpoint format: a `.safetensors` format
|
||||||
|
which prevents malware from masquerading as a model, and `diffusers`
|
||||||
|
models, the most recent innovation.
|
||||||
|
|
||||||
This manual will guide you through installing and configuring model weight
|
InvokeAI supports all three formats but strongly prefers the
|
||||||
files.
|
`diffusers` format. These are distributed as directories containing
|
||||||
|
multiple subfolders, each of which contains a different aspect of the
|
||||||
|
model. The advantage of this is that the models load from disk really
|
||||||
|
fast. Another advantage is that `diffusers` models are supported by a
|
||||||
|
large and active set of open source developers working at and with
|
||||||
|
HuggingFace organization, and improvements in both rendering quality
|
||||||
|
and performance are being made at a rapid pace. Among other features
|
||||||
|
is the ability to download and install a `diffusers` model just by
|
||||||
|
providing its HuggingFace repository ID.
|
||||||
|
|
||||||
|
While InvokeAI will continue to support `.ckpt` and `.safetensors`
|
||||||
|
models for the near future, these are deprecated and support will
|
||||||
|
likely be withdrawn at some point in the not-too-distant future.
|
||||||
|
|
||||||
|
This manual will guide you through installing and configuring model
|
||||||
|
weight files and converting legacy `.ckpt` and `.safetensors` files
|
||||||
|
into performant `diffusers` models.
|
||||||
|
|
||||||
## Base Models
|
## Base Models
|
||||||
|
|
||||||
InvokeAI comes with support for a good initial set of models listed in the model
|
InvokeAI comes with support for a good set of starter models. You'll
|
||||||
configuration file `configs/models.yaml`. They are:
|
find them listed in the master models file
|
||||||
|
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
|
||||||
|
subset that are currently installed are found in
|
||||||
|
`configs/models.yaml`. As of v2.3.1, the list of starter models is:
|
||||||
|
|
||||||
| Model | Weight File | Description | DOWNLOAD FROM |
|
|Model Name | HuggingFace Repo ID | Description | URL |
|
||||||
| -------------------- | --------------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------- |
|
|---------- | ---------- | ----------- | --- |
|
||||||
| stable-diffusion-1.5 | v1-5-pruned-emaonly.ckpt | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
|stable-diffusion-1.5|runwayml/stable-diffusion-v1-5|Stable Diffusion version 1.5 diffusers model (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||||
| stable-diffusion-1.4 | sd-v1-4.ckpt | Previous version of base Stable Diffusion model | https://huggingface.co/CompVis/stable-diffusion-v-1-4-original |
|
|sd-inpainting-1.5|runwayml/stable-diffusion-inpainting|RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)|https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||||
| inpainting-1.5 | sd-v1-5-inpainting.ckpt | Stable Diffusion 1.5 model specialized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
|stable-diffusion-2.1|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||||
| waifu-diffusion-1.3 | model-epoch09-float32.ckpt | Stable Diffusion 1.4 trained to produce anime images | https://huggingface.co/hakurei/waifu-diffusion-v1-3 |
|
|sd-inpainting-2.0|stabilityai/stable-diffusion-2-1|Stable Diffusion version 2.0 inpainting model (5.21 GB)|https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||||
| `<all models>` | vae-ft-mse-840000-ema-pruned.ckpt | A fine-tune file add-on file that improves face generation | https://huggingface.co/stabilityai/sd-vae-ft-mse-original/ |
|
|analog-diffusion-1.0|wavymulder/Analog-Diffusion|An SD-1.5 model trained on diverse analog photographs (2.13 GB)|https://huggingface.co/wavymulder/Analog-Diffusion |
|
||||||
|
|deliberate-1.0|XpucT/Deliberate|Versatile model that produces detailed images up to 768px (4.27 GB)|https://huggingface.co/XpucT/Deliberate |
|
||||||
|
|d&d-diffusion-1.0|0xJustin/Dungeons-and-Diffusion|Dungeons & Dragons characters (2.13 GB)|https://huggingface.co/0xJustin/Dungeons-and-Diffusion |
|
||||||
|
|dreamlike-photoreal-2.0|dreamlike-art/dreamlike-photoreal-2.0|A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)|https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
|
||||||
|
|inkpunk-1.0|Envvi/Inkpunk-Diffusion|Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)|https://huggingface.co/Envvi/Inkpunk-Diffusion |
|
||||||
|
|openjourney-4.0|prompthero/openjourney|An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)|https://huggingface.co/prompthero/openjourney |
|
||||||
|
|portrait-plus-1.0|wavymulder/portraitplus|An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)|https://huggingface.co/wavymulder/portraitplus |
|
||||||
|
|seek-art-mega-1.0|coreco/seek.art_MEGA|A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)|https://huggingface.co/coreco/seek.art_MEGA |
|
||||||
|
|trinart-2.0|naclbit/trinart_stable_diffusion_v2|An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)|https://huggingface.co/naclbit/trinart_stable_diffusion_v2 |
|
||||||
|
|waifu-diffusion-1.4|hakurei/waifu-diffusion|An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)|https://huggingface.co/hakurei/waifu-diffusion |
|
||||||
|
|
||||||
Note that these files are covered by an "Ethical AI" license which forbids
|
Note that these files are covered by an "Ethical AI" license which
|
||||||
certain uses. You will need to create an account on the Hugging Face website and
|
forbids certain uses. When you initially download them, you are asked
|
||||||
accept the license terms before you can access the files.
|
to accept the license terms. In addition, some of these models carry
|
||||||
|
additional license terms that limit their use in commercial
|
||||||
The predefined configuration file for InvokeAI (located at
|
applications or on public servers. Be sure to familiarize yourself
|
||||||
`configs/models.yaml`) provides entries for each of these weights files.
|
with the model terms by visiting the URLs in the table above.
|
||||||
`stable-diffusion-1.5` is the default model used, and we strongly recommend that
|
|
||||||
you install this weights file if nothing else.
|
|
||||||
|
|
||||||
## Community-Contributed Models
|
## Community-Contributed Models
|
||||||
|
|
||||||
There are too many to list here and more are being contributed every day.
|
There are too many to list here and more are being contributed every
|
||||||
Hugging Face maintains a
|
day. [HuggingFace](https://huggingface.co/models?library=diffusers)
|
||||||
[fast-growing repository](https://huggingface.co/sd-concepts-library) of
|
is a great resource for diffusers models, and is also the home of a
|
||||||
fine-tune (".bin") models that can be imported into InvokeAI by passing the
|
[fast-growing repository](https://huggingface.co/sd-concepts-library)
|
||||||
`--embedding_path` option to the `invoke.py` command.
|
of embedding (".bin") models that add subjects and/or styles to your
|
||||||
|
images. The latter are automatically installed on the fly when you
|
||||||
|
include the text `<concept-name>` in your prompt. See [Concepts
|
||||||
|
Library](../features/CONCEPTS.md) for more information.
|
||||||
|
|
||||||
[This page](https://rentry.org/sdmodels) hosts a large list of official and
|
Another popular site for community-contributed models is
|
||||||
unofficial Stable Diffusion models and where they can be obtained.
|
[CIVITAI](https://civitai.com). This extensive site currently supports
|
||||||
|
only `.safetensors` and `.ckpt` models, but they can be easily loaded
|
||||||
|
into InvokeAI and/or converted into optimized `diffusers` models. Be
|
||||||
|
aware that CIVITAI hosts many models that generate NSFW content.
|
||||||
|
|
||||||
|
!!! note
|
||||||
|
|
||||||
|
InvokeAI 2.3.x does not support directly importing and
|
||||||
|
running Stable Diffusion version 2 checkpoint models. You may instead
|
||||||
|
convert them into `diffusers` models using the conversion methods
|
||||||
|
described below.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
There are three ways to install weights files:
|
There are multiple ways to install and manage models:
|
||||||
|
|
||||||
1. During InvokeAI installation, the `invokeai-configure` script can download
|
1. The `invokeai-configure` script which will download and install them for you.
|
||||||
them for you.
|
|
||||||
|
|
||||||
2. You can use the command-line interface (CLI) to import, configure and modify
|
2. The command-line tool (CLI) has commands that allows you to import, configure and modify
|
||||||
new models files.
|
models files.
|
||||||
|
|
||||||
3. You can download the files manually and add the appropriate entries to
|
3. The web interface (WebUI) has a GUI for importing and managing
|
||||||
`models.yaml`.
|
models.
|
||||||
|
|
||||||
### Installation via `invokeai-configure`
|
### Installation via `invokeai-configure`
|
||||||
|
|
||||||
This is the most automatic way. Run `invokeai-configure` from the
|
From the `invoke` launcher, choose option (6) "re-run the configure
|
||||||
console. It will ask you to select which models to download and lead you through
|
script to download new models." This will launch the same script that
|
||||||
the steps of setting up a Hugging Face account if you haven't done so already.
|
prompted you to select models at install time. You can use this to add
|
||||||
|
models that you skipped the first time around. It is all right to
|
||||||
To start, run `invokeai-configure` from within the InvokeAI:
|
specify a model that was previously downloaded; the script will just
|
||||||
directory
|
confirm that the files are complete.
|
||||||
|
|
||||||
!!! example ""
|
|
||||||
|
|
||||||
```text
|
|
||||||
Loading Python libraries...
|
|
||||||
|
|
||||||
** INTRODUCTION **
|
|
||||||
Welcome to InvokeAI. This script will help download the Stable Diffusion weight files
|
|
||||||
and other large models that are needed for text to image generation. At any point you may interrupt
|
|
||||||
this program and resume later.
|
|
||||||
|
|
||||||
** WEIGHT SELECTION **
|
|
||||||
Would you like to download the Stable Diffusion model weights now? [y]
|
|
||||||
|
|
||||||
Choose the weight file(s) you wish to download. Before downloading you
|
|
||||||
will be given the option to view and change your selections.
|
|
||||||
|
|
||||||
[1] stable-diffusion-1.5:
|
|
||||||
The newest Stable Diffusion version 1.5 weight file (4.27 GB) (recommended)
|
|
||||||
Download? [y]
|
|
||||||
[2] inpainting-1.5:
|
|
||||||
RunwayML SD 1.5 model optimized for inpainting (4.27 GB) (recommended)
|
|
||||||
Download? [y]
|
|
||||||
[3] stable-diffusion-1.4:
|
|
||||||
The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
|
||||||
Download? [n] n
|
|
||||||
[4] waifu-diffusion-1.3:
|
|
||||||
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
|
|
||||||
Download? [n] y
|
|
||||||
[5] ft-mse-improved-autoencoder-840000:
|
|
||||||
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
|
|
||||||
Download? [y] y
|
|
||||||
The following weight files will be downloaded:
|
|
||||||
[1] stable-diffusion-1.5*
|
|
||||||
[2] inpainting-1.5
|
|
||||||
[4] waifu-diffusion-1.3
|
|
||||||
[5] ft-mse-improved-autoencoder-840000
|
|
||||||
*default
|
|
||||||
Ok to download? [y]
|
|
||||||
** LICENSE AGREEMENT FOR WEIGHT FILES **
|
|
||||||
|
|
||||||
1. To download the Stable Diffusion weight files you need to read and accept the
|
|
||||||
CreativeML Responsible AI license. If you have not already done so, please
|
|
||||||
create an account using the "Sign Up" button:
|
|
||||||
|
|
||||||
https://huggingface.co
|
|
||||||
|
|
||||||
You will need to verify your email address as part of the HuggingFace
|
|
||||||
registration process.
|
|
||||||
|
|
||||||
2. After creating the account, login under your account and accept
|
|
||||||
the license terms located here:
|
|
||||||
|
|
||||||
https://huggingface.co/CompVis/stable-diffusion-v-1-4-original
|
|
||||||
|
|
||||||
Press <enter> when you are ready to continue:
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
When the script is complete, you will find the downloaded weights files in
|
|
||||||
`models/ldm/stable-diffusion-v1` and a matching configuration file in
|
|
||||||
`configs/models.yaml`.
|
|
||||||
|
|
||||||
You can run the script again to add any models you didn't select the first time.
|
|
||||||
Note that as a safety measure the script will _never_ remove a
|
|
||||||
previously-installed weights file. You will have to do this manually.
|
|
||||||
|
|
||||||
### Installation via the CLI
|
### Installation via the CLI
|
||||||
|
|
||||||
You can install a new model, including any of the community-supported ones, via
|
You can install a new model, including any of the community-supported ones, via
|
||||||
the command-line client's `!import_model` command.
|
the command-line client's `!import_model` command.
|
||||||
|
|
||||||
1. First download the desired model weights file and place it under
|
#### Installing individual `.ckpt` and `.safetensors` models
|
||||||
`models/ldm/stable-diffusion-v1/`. You may rename the weights file to
|
|
||||||
something more memorable if you wish. Record the path of the weights file
|
|
||||||
(e.g. `models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`)
|
|
||||||
|
|
||||||
2. Launch the `invoke.py` CLI with `python scripts/invoke.py`.
|
If the model is already downloaded to your local disk, use
|
||||||
|
`!import_model /path/to/file.ckpt` to load it. For example:
|
||||||
|
|
||||||
3. At the `invoke>` command-line, enter the command
|
```bash
|
||||||
`!import_model <path to model>`. For example:
|
invoke> !import_model C:/Users/fred/Downloads/martians.safetensors
|
||||||
|
```
|
||||||
|
|
||||||
`invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
|
!!! tip "Forward Slashes"
|
||||||
|
On Windows systems, use forward slashes rather than backslashes
|
||||||
|
in your file paths.
|
||||||
|
If you do use backslashes,
|
||||||
|
you must double them like this:
|
||||||
|
`C:\\Users\\fred\\Downloads\\martians.safetensors`
|
||||||
|
|
||||||
!!! tip "the CLI supports file path autocompletion"
|
Alternatively you can directly import the file using its URL:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke> !import_model https://example.org/sd_models/martians.safetensors
|
||||||
|
```
|
||||||
|
|
||||||
|
For this to work, the URL must not be password-protected. Otherwise
|
||||||
|
you will receive a 404 error.
|
||||||
|
|
||||||
|
When you import a legacy model, the CLI will first ask you what type
|
||||||
|
of model this is. You can indicate whether it is a model based on
|
||||||
|
Stable Diffusion 1.x (1.4 or 1.5), one based on Stable Diffusion 2.x,
|
||||||
|
or a 1.x inpainting model. Be careful to indicate the correct model
|
||||||
|
type, or it will not load correctly. You can correct the model type
|
||||||
|
after the fact using the `!edit_model` command.
|
||||||
|
|
||||||
|
The system will then ask you a few other questions about the model,
|
||||||
|
including what size image it was trained on (usually 512x512), what
|
||||||
|
name and description you wish to use for it, and whether you would
|
||||||
|
like to install a custom VAE (variable autoencoder) file for the
|
||||||
|
model. For recent models, the answer to the VAE question is usually
|
||||||
|
"no," but it won't hurt to answer "yes".
|
||||||
|
|
||||||
|
After importing, the model will load. If this is successful, you will
|
||||||
|
be asked if you want to keep the model loaded in memory to start
|
||||||
|
generating immediately. You'll also be asked if you wish to make this
|
||||||
|
the default model on startup. You can change this later using
|
||||||
|
`!edit_model`.
|
||||||
|
|
||||||
|
#### Importing a batch of `.ckpt` and `.safetensors` models from a directory
|
||||||
|
|
||||||
|
You may also point `!import_model` to a directory containing a set of
|
||||||
|
`.ckpt` or `.safetensors` files. They will be imported _en masse_.
|
||||||
|
|
||||||
|
!!! example
|
||||||
|
|
||||||
|
```console
|
||||||
|
invoke> !import_model C:/Users/fred/Downloads/civitai_models/
|
||||||
|
```
|
||||||
|
|
||||||
|
You will be given the option to import all models found in the
|
||||||
|
directory, or select which ones to import. If there are subfolders
|
||||||
|
within the directory, they will be searched for models to import.
|
||||||
|
|
||||||
|
#### Installing `diffusers` models
|
||||||
|
|
||||||
|
You can install a `diffusers` model from the HuggingFace site using
|
||||||
|
`!import_model` and the HuggingFace repo_id for the model:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke> !import_model andite/anything-v4.0
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, you can download the model to disk and import it from
|
||||||
|
there. The model may be distributed as a ZIP file, or as a Git
|
||||||
|
repository:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke> !import_model C:/Users/fred/Downloads/andite--anything-v4.0
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! tip "The CLI supports file path autocompletion"
|
||||||
Type a bit of the path name and hit ++tab++ in order to get a choice of
|
Type a bit of the path name and hit ++tab++ in order to get a choice of
|
||||||
possible completions.
|
possible completions.
|
||||||
|
|
||||||
!!! tip "on Windows, you can drag model files onto the command-line"
|
!!! tip "On Windows, you can drag model files onto the command-line"
|
||||||
|
Once you have typed in `!import_model `, you can drag the
|
||||||
|
model file or directory onto the command-line to insert the model path. This way, you don't need to
|
||||||
|
type it or copy/paste. However, you will need to reverse or
|
||||||
|
double backslashes as noted above.
|
||||||
|
|
||||||
Once you have typed in `!import_model `, you can drag the model `.ckpt` file
|
Before installing, the CLI will ask you for a short name and
|
||||||
onto the command-line to insert the model path. This way, you don't need to
|
description for the model, whether to make this the default model that
|
||||||
type it or copy/paste.
|
is loaded at InvokeAI startup time, and whether to replace its
|
||||||
|
VAE. Generally the answer to the latter question is "no".
|
||||||
|
|
||||||
4. Follow the wizard's instructions to complete installation as shown in the
|
### Converting legacy models into `diffusers`
|
||||||
example here:
|
|
||||||
|
|
||||||
!!! example ""
|
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
|
||||||
|
models file into `diffusers` and install it.This will enable the model
|
||||||
|
to load and run faster without loss of image quality.
|
||||||
|
|
||||||
```text
|
The usage is identical to `!import_model`. You may point the command
|
||||||
invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
to either a downloaded model file on disk, or to a (non-password
|
||||||
>> Model import in process. Please enter the values needed to configure this model:
|
protected) URL:
|
||||||
|
|
||||||
Name for this model: arabian-nights
|
```bash
|
||||||
Description of this model: Arabian Nights Fine Tune v1.0
|
invoke> !convert_model C:/Users/fred/Downloads/martians.safetensors
|
||||||
Configuration file for this model: configs/stable-diffusion/v1-inference.yaml
|
|
||||||
Default image width: 512
|
|
||||||
Default image height: 512
|
|
||||||
>> New configuration:
|
|
||||||
arabian-nights:
|
|
||||||
config: configs/stable-diffusion/v1-inference.yaml
|
|
||||||
description: Arabian Nights Fine Tune v1.0
|
|
||||||
height: 512
|
|
||||||
weights: models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
|
||||||
width: 512
|
|
||||||
OK to import [n]? y
|
|
||||||
>> Caching model stable-diffusion-1.4 in system RAM
|
|
||||||
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
|
||||||
| LatentDiffusion: Running in eps-prediction mode
|
|
||||||
| DiffusionWrapper has 859.52 M params.
|
|
||||||
| Making attention of type 'vanilla' with 512 in_channels
|
|
||||||
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
|
||||||
| Making attention of type 'vanilla' with 512 in_channels
|
|
||||||
| Using faster float16 precision
|
|
||||||
```
|
```
|
||||||
|
|
||||||
If you've previously installed the fine-tune VAE file
|
After a successful conversion, the CLI will offer you the option of
|
||||||
`vae-ft-mse-840000-ema-pruned.ckpt`, the wizard will also ask you if you want to
|
deleting the original `.ckpt` or `.safetensors` file.
|
||||||
add this VAE to the model.
|
|
||||||
|
|
||||||
The appropriate entry for this model will be added to `configs/models.yaml` and
|
### Optimizing a previously-installed model
|
||||||
it will be available to use in the CLI immediately.
|
|
||||||
|
|
||||||
The CLI has additional commands for switching among, viewing, editing, deleting
|
Lastly, if you have previously installed a `.ckpt` or `.safetensors`
|
||||||
the available models. These are described in
|
file and wish to convert it into a `diffusers` model, you can do this
|
||||||
[Command Line Client](../features/CLI.md#model-selection-and-importation), but
|
without re-downloading and converting the original file using the
|
||||||
the two most frequently-used are `!models` and `!switch <name of model>`. The
|
`!optimize_model` command. Simply pass the short name of an existing
|
||||||
first prints a table of models that InvokeAI knows about and their load status.
|
installed model:
|
||||||
The second will load the requested model and lets you switch back and forth
|
|
||||||
quickly among loaded models.
|
```bash
|
||||||
|
invoke> !optimize_model martians-v1.0
|
||||||
|
```
|
||||||
|
|
||||||
|
The model will be converted into `diffusers` format and replace the
|
||||||
|
previously installed version. You will again be offered the
|
||||||
|
opportunity to delete the original `.ckpt` or `.safetensors` file.
|
||||||
|
|
||||||
|
### Related CLI Commands
|
||||||
|
|
||||||
|
There are a whole series of additional model management commands in
|
||||||
|
the CLI that you can read about in [Command-Line
|
||||||
|
Interface](../features/CLI.md). These include:
|
||||||
|
|
||||||
|
* `!models` - List all installed models
|
||||||
|
* `!switch <model name>` - Switch to the indicated model
|
||||||
|
* `!edit_model <model name>` - Edit the indicated model to change its name, description or other properties
|
||||||
|
* `!del_model <model name>` - Delete the indicated model
|
||||||
|
|
||||||
|
### Manually editing `configs/models.yaml`
|
||||||
|
|
||||||
### Manually editing of `configs/models.yaml`
|
|
||||||
|
|
||||||
If you are comfortable with a text editor then you may simply edit `models.yaml`
|
If you are comfortable with a text editor then you may simply edit `models.yaml`
|
||||||
directly.
|
directly.
|
||||||
|
|
||||||
First you need to download the desired .ckpt file and place it in
|
You will need to download the desired `.ckpt/.safetensors` file and
|
||||||
`models/ldm/stable-diffusion-v1` as descirbed in step #1 in the previous
|
place it somewhere on your machine's filesystem. Alternatively, for a
|
||||||
section. Record the path to the weights file, e.g.
|
`diffusers` model, record the repo_id or download the whole model
|
||||||
`models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
|
directory. Then using a **text** editor (e.g. the Windows Notepad
|
||||||
|
application), open the file `configs/models.yaml`, and add a new
|
||||||
|
stanza that follows this model:
|
||||||
|
|
||||||
Then using a **text** editor (e.g. the Windows Notepad application), open the
|
#### A legacy model
|
||||||
file `configs/models.yaml`, and add a new stanza that follows this model:
|
|
||||||
|
A legacy `.ckpt` or `.safetensors` entry will look like this:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
arabian-nights-1.0:
|
arabian-nights-1.0:
|
||||||
description: A great fine-tune in Arabian Nights style
|
description: A great fine-tune in Arabian Nights style
|
||||||
weights: ./models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
weights: ./path/to/arabian-nights-1.0.ckpt
|
||||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||||
|
format: ckpt
|
||||||
width: 512
|
width: 512
|
||||||
height: 512
|
height: 512
|
||||||
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
|
||||||
default: false
|
default: false
|
||||||
```
|
```
|
||||||
|
|
||||||
| name | description |
|
Note that `format` is `ckpt` for both `.ckpt` and `.safetensors` files.
|
||||||
| :----------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
|
|
||||||
| description | Any description that you want to add to the model to remind you what it is. |
|
|
||||||
| weights | Relative path to the .ckpt weights file for this model. |
|
|
||||||
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `invokeai-configure` script. |
|
|
||||||
| vae | If you want to add a VAE file to the model, then enter its path here. |
|
|
||||||
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
|
|
||||||
|
|
||||||
Save the `models.yaml` and relaunch InvokeAI. The new model should now be
|
#### A diffusers model
|
||||||
available for your use.
|
|
||||||
|
A stanza for a `diffusers` model will look like this for a HuggingFace
|
||||||
|
model with a repository ID:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
arabian-nights-1.1:
|
||||||
|
description: An even better fine-tune of the Arabian Nights
|
||||||
|
repo_id: captahab/arabian-nights-1.1
|
||||||
|
format: diffusers
|
||||||
|
default: true
|
||||||
|
```
|
||||||
|
|
||||||
|
And for a downloaded directory:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
arabian-nights-1.1:
|
||||||
|
description: An even better fine-tune of the Arabian Nights
|
||||||
|
path: /path/to/captahab-arabian-nights-1.1
|
||||||
|
format: diffusers
|
||||||
|
default: true
|
||||||
|
```
|
||||||
|
|
||||||
|
There is additional syntax for indicating an external VAE to use with
|
||||||
|
this model. See `INITIAL_MODELS.yaml` and `models.yaml` for examples.
|
||||||
|
|
||||||
|
After you save the modified `models.yaml` file relaunch
|
||||||
|
`invokeai`. The new model will now be available for your use.
|
||||||
|
|
||||||
|
### Installation via the WebUI
|
||||||
|
|
||||||
|
To access the WebUI Model Manager, click on the button that looks like
|
||||||
|
a cube in the upper right side of the browser screen. This will bring
|
||||||
|
up a dialogue that lists the models you have already installed, and
|
||||||
|
allows you to load, delete or edit them:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
To add a new model, click on **+ Add New** and select to either a
|
||||||
|
checkpoint/safetensors model, or a diffusers model:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
In this example, we chose **Add Diffusers**. As shown in the figure
|
||||||
|
below, a new dialogue prompts you to enter the name to use for the
|
||||||
|
model, its description, and either the location of the `diffusers`
|
||||||
|
model on disk, or its Repo ID on the HuggingFace web site. If you
|
||||||
|
choose to enter a path to disk, the system will autocomplete for you
|
||||||
|
as you type:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
Press **Add Model** at the bottom of the dialogue (scrolled out of
|
||||||
|
site in the figure), and the model will be downloaded, imported, and
|
||||||
|
registered in `models.yaml`.
|
||||||
|
|
||||||
|
The **Add Checkpoint/Safetensor Model** option is similar, except that
|
||||||
|
in this case you can choose to scan an entire folder for
|
||||||
|
checkpoint/safetensors files to import. Simply type in the path of the
|
||||||
|
directory and press the "Search" icon. This will display the
|
||||||
|
`.ckpt` and `.safetensors` found inside the directory and its
|
||||||
|
subfolders, and allow you to choose which ones to import:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
## Model Management Startup Options
|
||||||
|
|
||||||
|
The `invoke` launcher and the `invokeai` script accept a series of
|
||||||
|
command-line arguments that modify InvokeAI's behavior when loading
|
||||||
|
models. These can be provided on the command line, or added to the
|
||||||
|
InvokeAI root directory's `invokeai.init` initialization file.
|
||||||
|
|
||||||
|
The arguments are:
|
||||||
|
|
||||||
|
* `--model <model name>` -- Start up with the indicated model loaded
|
||||||
|
* `--ckpt_convert` -- When a checkpoint/safetensors model is loaded, convert it into a `diffusers` model in memory. This does not permanently save the converted model to disk.
|
||||||
|
* `--autoconvert <path/to/directory>` -- Scan the indicated directory path for new checkpoint/safetensors files, convert them into `diffusers` models, and import them into InvokeAI.
|
||||||
|
|
||||||
|
Here is an example of providing an argument on the command line using
|
||||||
|
the `invoke.sh` launch script:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.sh --autoconvert /home/fred/stable-diffusion-checkpoints
|
||||||
|
```
|
||||||
|
|
||||||
|
And here is what the same argument looks like in `invokeai.init`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
--outdir="/home/fred/invokeai/outputs
|
||||||
|
--no-nsfw_checker
|
||||||
|
--autoconvert /home/fred/stable-diffusion-checkpoints
|
||||||
|
```
|
||||||
|
@ -24,7 +24,7 @@ You need to have opencv installed so that pypatchmatch can be built:
|
|||||||
brew install opencv
|
brew install opencv
|
||||||
```
|
```
|
||||||
|
|
||||||
The next time you start `invoke`, after sucesfully installing opencv, pypatchmatch will be built.
|
The next time you start `invoke`, after successfully installing opencv, pypatchmatch will be built.
|
||||||
|
|
||||||
## Linux
|
## Linux
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ Prior to installing PyPatchMatch, you need to take the following steps:
|
|||||||
|
|
||||||
5. Confirm that pypatchmatch is installed. At the command-line prompt enter
|
5. Confirm that pypatchmatch is installed. At the command-line prompt enter
|
||||||
`python`, and then at the `>>>` line type
|
`python`, and then at the `>>>` line type
|
||||||
`from patchmatch import patch_match`: It should look like the follwing:
|
`from patchmatch import patch_match`: It should look like the following:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
||||||
@ -108,4 +108,4 @@ Prior to installing PyPatchMatch, you need to take the following steps:
|
|||||||
|
|
||||||
[**Next, Follow Steps 4-6 from the Debian Section above**](#linux)
|
[**Next, Follow Steps 4-6 from the Debian Section above**](#linux)
|
||||||
|
|
||||||
If you see no errors, then you're ready to go!
|
If you see no errors you're ready to go!
|
||||||
|
@ -1 +0,0 @@
|
|||||||
010_INSTALL_AUTOMATED.md
|
|
@ -1,374 +0,0 @@
|
|||||||
---
|
|
||||||
title: Manual Installation
|
|
||||||
---
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
# :fontawesome-brands-linux: Linux | :fontawesome-brands-apple: macOS | :fontawesome-brands-windows: Windows
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
!!! warning "This is for advanced Users"
|
|
||||||
|
|
||||||
who are already experienced with using conda or pip
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
You have two choices for manual installation, the [first one](#Conda_method)
|
|
||||||
based on the Anaconda3 package manager (`conda`), and
|
|
||||||
[a second one](#PIP_method) which uses basic Python virtual environment (`venv`)
|
|
||||||
commands and the PIP package manager. Both methods require you to enter commands
|
|
||||||
on the terminal, also known as the "console".
|
|
||||||
|
|
||||||
On Windows systems you are encouraged to install and use the
|
|
||||||
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
|
||||||
which provides compatibility with Linux and Mac shells and nice features such as
|
|
||||||
command-line completion.
|
|
||||||
|
|
||||||
### Conda method
|
|
||||||
|
|
||||||
1. Check that your system meets the
|
|
||||||
[hardware requirements](index.md#Hardware_Requirements) and has the
|
|
||||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
|
||||||
with an AMD GPU installed, you may need to install the
|
|
||||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
|
||||||
|
|
||||||
InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
|
|
||||||
of ROCm driver support on this platform.
|
|
||||||
|
|
||||||
To confirm that the appropriate drivers are installed, run `nvidia-smi` on
|
|
||||||
NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
|
|
||||||
information about the installed video card.
|
|
||||||
|
|
||||||
Macintosh users with MPS acceleration, or anybody with a CPU-only system,
|
|
||||||
can skip this step.
|
|
||||||
|
|
||||||
2. You will need to install Anaconda3 and Git if they are not already
|
|
||||||
available. Use your operating system's preferred package manager, or
|
|
||||||
download the installers manually. You can find them here:
|
|
||||||
|
|
||||||
- [Anaconda3](https://www.anaconda.com/)
|
|
||||||
- [git](https://git-scm.com/downloads)
|
|
||||||
|
|
||||||
3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
|
||||||
GitHub:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
|
||||||
```
|
|
||||||
|
|
||||||
This will create InvokeAI folder where you will follow the rest of the
|
|
||||||
steps.
|
|
||||||
|
|
||||||
4. Enter the newly-created InvokeAI folder:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd InvokeAI
|
|
||||||
```
|
|
||||||
|
|
||||||
From this step forward make sure that you are working in the InvokeAI
|
|
||||||
directory!
|
|
||||||
|
|
||||||
5. Select the appropriate environment file:
|
|
||||||
|
|
||||||
We have created a series of environment files suited for different operating
|
|
||||||
systems and GPU hardware. They are located in the
|
|
||||||
`environments-and-requirements` directory:
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
|
|
||||||
| filename | OS |
|
|
||||||
| :----------------------: | :----------------------------: |
|
|
||||||
| environment-lin-amd.yml | Linux with an AMD (ROCm) GPU |
|
|
||||||
| environment-lin-cuda.yml | Linux with an NVIDIA CUDA GPU |
|
|
||||||
| environment-mac.yml | Macintosh |
|
|
||||||
| environment-win-cuda.yml | Windows with an NVIDA CUDA GPU |
|
|
||||||
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
Choose the appropriate environment file for your system and link or copy it
|
|
||||||
to `environment.yml` in InvokeAI's top-level directory. To do so, run
|
|
||||||
following command from the repository-root:
|
|
||||||
|
|
||||||
!!! Example ""
|
|
||||||
|
|
||||||
=== "Macintosh and Linux"
|
|
||||||
|
|
||||||
!!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
When this is done, confirm that a file `environment.yml` has been linked in
|
|
||||||
the InvokeAI root directory and that it points to the correct file in the
|
|
||||||
`environments-and-requirements`.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ls -la
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Windows"
|
|
||||||
|
|
||||||
!!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
Afterwards verify that the file `environment.yml` has been created, either via the
|
|
||||||
explorer or by using the command `dir` from the terminal
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
dir
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
|
||||||
|
|
||||||
6. Create the conda environment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda env update
|
|
||||||
```
|
|
||||||
|
|
||||||
This will create a new environment named `invokeai` and install all InvokeAI
|
|
||||||
dependencies into it. If something goes wrong you should take a look at
|
|
||||||
[troubleshooting](#troubleshooting).
|
|
||||||
|
|
||||||
7. Activate the `invokeai` environment:
|
|
||||||
|
|
||||||
In order to use the newly created environment you will first need to
|
|
||||||
activate it
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda activate invokeai
|
|
||||||
```
|
|
||||||
|
|
||||||
Your command-line prompt should change to indicate that `invokeai` is active
|
|
||||||
by prepending `(invokeai)`.
|
|
||||||
|
|
||||||
8. Pre-Load the model weights files:
|
|
||||||
|
|
||||||
!!! tip
|
|
||||||
|
|
||||||
If you have already downloaded the weights file(s) for another Stable
|
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
|
||||||
process for this is described in [here](050_INSTALLING_MODELS.md).
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python scripts/configure_invokeai.py
|
|
||||||
```
|
|
||||||
|
|
||||||
The script `configure_invokeai.py` will interactively guide you through the
|
|
||||||
process of downloading and installing the weights files needed for InvokeAI.
|
|
||||||
Note that the main Stable Diffusion weights file is protected by a license
|
|
||||||
agreement that you have to agree to. The script will list the steps you need
|
|
||||||
to take to create an account on the site that hosts the weights files,
|
|
||||||
accept the agreement, and provide an access token that allows InvokeAI to
|
|
||||||
legally download and install the weights files.
|
|
||||||
|
|
||||||
If you get an error message about a module not being installed, check that
|
|
||||||
the `invokeai` environment is active and if not, repeat step 5.
|
|
||||||
|
|
||||||
9. Run the command-line- or the web- interface:
|
|
||||||
|
|
||||||
!!! example ""
|
|
||||||
|
|
||||||
!!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
|
|
||||||
|
|
||||||
=== "CLI"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python scripts/invoke.py
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "local Webserver"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python scripts/invoke.py --web
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Public Webserver"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python scripts/invoke.py --web --host 0.0.0.0
|
|
||||||
```
|
|
||||||
|
|
||||||
If you choose the run the web interface, point your browser at
|
|
||||||
http://localhost:9090 in order to load the GUI.
|
|
||||||
|
|
||||||
10. Render away!
|
|
||||||
|
|
||||||
Browse the [features](../features/CLI.md) section to learn about all the things you
|
|
||||||
can do with InvokeAI.
|
|
||||||
|
|
||||||
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
|
||||||
card with the ROCm driver, you may have to wait for over a minute the first
|
|
||||||
time you try to generate an image. Fortunately, after the warm up period
|
|
||||||
rendering will be fast.
|
|
||||||
|
|
||||||
11. Subsequently, to relaunch the script, be sure to run "conda activate
|
|
||||||
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
|
||||||
script. If you forget to activate the 'invokeai' environment, the script
|
|
||||||
will fail with multiple `ModuleNotFound` errors.
|
|
||||||
|
|
||||||
## Updating to newer versions of the script
|
|
||||||
|
|
||||||
This distribution is changing rapidly. If you used the `git clone` method
|
|
||||||
(step 5) to download the InvokeAI directory, then to update to the latest and
|
|
||||||
greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git pull
|
|
||||||
conda env update
|
|
||||||
python scripts/configure_invokeai.py --no-interactive #optional
|
|
||||||
```
|
|
||||||
|
|
||||||
This will bring your local copy into sync with the remote one. The last step may
|
|
||||||
be needed to take advantage of new features or released models. The
|
|
||||||
`--no-interactive` flag will prevent the script from prompting you to download
|
|
||||||
the big Stable Diffusion weights files.
|
|
||||||
|
|
||||||
## pip Install
|
|
||||||
|
|
||||||
To install InvokeAI with only the PIP package manager, please follow these
|
|
||||||
steps:
|
|
||||||
|
|
||||||
1. Make sure you are using Python 3.9 or higher. The rest of the install
|
|
||||||
procedure depends on this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python -V
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Install the `virtualenv` tool if you don't have it already:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install virtualenv
|
|
||||||
```
|
|
||||||
|
|
||||||
3. From within the InvokeAI top-level directory, create and activate a virtual
|
|
||||||
environment named `invokeai`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
virtualenv invokeai
|
|
||||||
source invokeai/bin/activate
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Run PIP
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip --python invokeai install --use-pep517 .
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
Here are some common issues and their suggested solutions.
|
|
||||||
|
|
||||||
### Conda
|
|
||||||
|
|
||||||
#### Conda fails before completing `conda update`
|
|
||||||
|
|
||||||
The usual source of these errors is a package incompatibility. While we have
|
|
||||||
tried to minimize these, over time packages get updated and sometimes introduce
|
|
||||||
incompatibilities.
|
|
||||||
|
|
||||||
We suggest that you search
|
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
|
|
||||||
channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
|
||||||
|
|
||||||
You may also try to install the broken packages manually using PIP. To do this,
|
|
||||||
activate the `invokeai` environment, and run `pip install` with the name and
|
|
||||||
version of the package that is causing the incompatibility. For example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install test-tube==0.7.5
|
|
||||||
```
|
|
||||||
|
|
||||||
You can keep doing this until all requirements are satisfied and the `invoke.py`
|
|
||||||
script runs without errors. Please report to
|
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
|
|
||||||
to work around the problem so that others can benefit from your investigation.
|
|
||||||
|
|
||||||
### Create Conda Environment fails on MacOS
|
|
||||||
|
|
||||||
If conda create environment fails with lmdb error, this is most likely caused by Clang.
|
|
||||||
Run brew config to see which Clang is installed on your Mac. If Clang isn't installed, that's causing the error.
|
|
||||||
Start by installing additional XCode command line tools, followed by brew install llvm.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
xcode-select --install
|
|
||||||
brew install llvm
|
|
||||||
```
|
|
||||||
|
|
||||||
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
|
||||||
|
|
||||||
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
|
|
||||||
|
|
||||||
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
|
||||||
have linked to the correct environment file and run `conda update` again.
|
|
||||||
|
|
||||||
If the problem persists, a more extreme measure is to clear Conda's caches and
|
|
||||||
remove the `invokeai` environment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda deactivate
|
|
||||||
conda env remove -n invokeai
|
|
||||||
conda clean -a
|
|
||||||
conda update
|
|
||||||
```
|
|
||||||
|
|
||||||
This removes all cached library files, including ones that may have been
|
|
||||||
corrupted somehow. (This is not supposed to happen, but does anyway).
|
|
||||||
|
|
||||||
#### `invoke.py` crashes at a later stage
|
|
||||||
|
|
||||||
If the CLI or web site had been working ok, but something unexpected happens
|
|
||||||
later on during the session, you've encountered a code bug that is probably
|
|
||||||
unrelated to an install issue. Please search
|
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
|
|
||||||
ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
|
|
||||||
|
|
||||||
#### My renders are running very slowly
|
|
||||||
|
|
||||||
You may have installed the wrong torch (machine learning) package, and the
|
|
||||||
system is running on CPU rather than the GPU. To check, look at the log messages
|
|
||||||
that appear when `invoke.py` is first starting up. One of the earlier lines
|
|
||||||
should say `Using device type cuda`. On AMD systems, it will also say "cuda",
|
|
||||||
and on Macintoshes, it should say "mps". If instead the message says it is
|
|
||||||
running on "cpu", then you may need to install the correct torch library.
|
|
||||||
|
|
||||||
You may be able to fix this by installing a different torch library. Here are
|
|
||||||
the magic incantations for Conda and PIP.
|
|
||||||
|
|
||||||
!!! todo "For CUDA systems"
|
|
||||||
|
|
||||||
- conda
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
|
|
||||||
```
|
|
||||||
|
|
||||||
- pip
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! todo "For AMD systems"
|
|
||||||
|
|
||||||
- conda
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda activate invokeai
|
|
||||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
|
||||||
```
|
|
||||||
|
|
||||||
- pip
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
|
||||||
```
|
|
||||||
|
|
||||||
More information and troubleshooting tips can be found at https://pytorch.org.
|
|
@ -3,7 +3,19 @@ title: Overview
|
|||||||
---
|
---
|
||||||
|
|
||||||
We offer several ways to install InvokeAI, each one suited to your
|
We offer several ways to install InvokeAI, each one suited to your
|
||||||
experience and preferences.
|
experience and preferences. We suggest that everyone start by
|
||||||
|
reviewing the
|
||||||
|
[hardware](010_INSTALL_AUTOMATED.md#hardware_requirements) and
|
||||||
|
[software](010_INSTALL_AUTOMATED.md#software_requirements)
|
||||||
|
requirements, as they are the same across each install method. Then
|
||||||
|
pick the install method most suitable to your level of experience and
|
||||||
|
needs.
|
||||||
|
|
||||||
|
See the [troubleshooting
|
||||||
|
section](010_INSTALL_AUTOMATED.md#troubleshooting) of the automated
|
||||||
|
install guide for frequently-encountered installation issues.
|
||||||
|
|
||||||
|
## Main Application
|
||||||
|
|
||||||
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
|
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
|
||||||
|
|
||||||
@ -33,3 +45,10 @@ experience and preferences.
|
|||||||
InvokeAI and its dependencies. This method is recommended for
|
InvokeAI and its dependencies. This method is recommended for
|
||||||
individuals with experience with Docker containers and understand
|
individuals with experience with Docker containers and understand
|
||||||
the pluses and minuses of a container-based install.
|
the pluses and minuses of a container-based install.
|
||||||
|
|
||||||
|
## Quick Guides
|
||||||
|
|
||||||
|
* [Installing CUDA and ROCm Drivers](./030_INSTALL_CUDA_AND_ROCM.md)
|
||||||
|
* [Installing XFormers](./070_INSTALL_XFORMERS.md)
|
||||||
|
* [Installing PyPatchMatch](./060_INSTALL_PATCHMATCH.md)
|
||||||
|
* [Installing New Models](./050_INSTALLING_MODELS.md)
|
||||||
|
@ -1,73 +0,0 @@
|
|||||||
openapi: 3.0.3
|
|
||||||
info:
|
|
||||||
title: Stable Diffusion
|
|
||||||
description: |-
|
|
||||||
TODO: Description Here
|
|
||||||
|
|
||||||
Some useful links:
|
|
||||||
- [Stable Diffusion Dream Server](https://github.com/lstein/stable-diffusion)
|
|
||||||
|
|
||||||
license:
|
|
||||||
name: MIT License
|
|
||||||
url: https://github.com/lstein/stable-diffusion/blob/main/LICENSE
|
|
||||||
version: 1.0.0
|
|
||||||
servers:
|
|
||||||
- url: http://localhost:9090/api
|
|
||||||
tags:
|
|
||||||
- name: images
|
|
||||||
description: Retrieve and manage generated images
|
|
||||||
paths:
|
|
||||||
/images/{imageId}:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- images
|
|
||||||
summary: Get image by ID
|
|
||||||
description: Returns a single image
|
|
||||||
operationId: getImageById
|
|
||||||
parameters:
|
|
||||||
- name: imageId
|
|
||||||
in: path
|
|
||||||
description: ID of image to return
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
'200':
|
|
||||||
description: successful operation
|
|
||||||
content:
|
|
||||||
image/png:
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
format: binary
|
|
||||||
'404':
|
|
||||||
description: Image not found
|
|
||||||
/intermediates/{intermediateId}/{step}:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- images
|
|
||||||
summary: Get intermediate image by ID
|
|
||||||
description: Returns a single intermediate image
|
|
||||||
operationId: getIntermediateById
|
|
||||||
parameters:
|
|
||||||
- name: intermediateId
|
|
||||||
in: path
|
|
||||||
description: ID of intermediate to return
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
- name: step
|
|
||||||
in: path
|
|
||||||
description: The generation step of the intermediate
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
'200':
|
|
||||||
description: successful operation
|
|
||||||
content:
|
|
||||||
image/png:
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
format: binary
|
|
||||||
'404':
|
|
||||||
description: Intermediate not found
|
|
@ -23,9 +23,11 @@ We thank them for all of their time and hard work.
|
|||||||
* @damian0815 - Attention Systems and Gameplay Engineer
|
* @damian0815 - Attention Systems and Gameplay Engineer
|
||||||
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||||
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||||
* @tildebyte - general gadfly and resident (self-appointed) know-it-all
|
* @tildebyte - General gadfly and resident (self-appointed) know-it-all
|
||||||
* @keturn - Lead for Diffusers port
|
* @keturn - Lead for Diffusers port
|
||||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||||
|
* @jpphoto (Jonathan Pollack) - Inference and rendering engine optimization
|
||||||
|
* @genomancer (Gregg Helt) - Model training and merging
|
||||||
|
|
||||||
## **Contributions by**
|
## **Contributions by**
|
||||||
|
|
||||||
|
19
docs/other/TRANSLATION.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# Translation
|
||||||
|
|
||||||
|
InvokeAI uses [Weblate](https://weblate.org) for translation. Weblate is a FOSS project providing a scalable translation service. Weblate automates the tedious parts of managing translation of a growing project, and the service is generously provided at no cost to FOSS projects like InvokeAI.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
If you'd like to contribute by adding or updating a translation, please visit our [Weblate project](https://hosted.weblate.org/engage/invokeai/). You'll need to sign in with your GitHub account (a number of other accounts are supported, including Google).
|
||||||
|
|
||||||
|
Once signed in, select a language and then the Web UI component. From here you can Browse and Translate strings from English to your chosen language. Zen mode offers a simpler translation experience.
|
||||||
|
|
||||||
|
Your changes will be attributed to you in the automated PR process; you don't need to do anything else.
|
||||||
|
|
||||||
|
## Help & Questions
|
||||||
|
|
||||||
|
Please check Weblate's [documentation](https://docs.weblate.org/en/latest/index.html) or ping @psychedelicious or @blessedcoolant on Discord if you have any questions.
|
||||||
|
|
||||||
|
## Thanks
|
||||||
|
|
||||||
|
Thanks to the InvokeAI community for their efforts to translate the project!
|
Before Width: | Height: | Size: 665 B |
Before Width: | Height: | Size: 628 B |
@ -1,16 +0,0 @@
|
|||||||
html {
|
|
||||||
box-sizing: border-box;
|
|
||||||
overflow: -moz-scrollbars-vertical;
|
|
||||||
overflow-y: scroll;
|
|
||||||
}
|
|
||||||
|
|
||||||
*,
|
|
||||||
*:before,
|
|
||||||
*:after {
|
|
||||||
box-sizing: inherit;
|
|
||||||
}
|
|
||||||
|
|
||||||
body {
|
|
||||||
margin: 0;
|
|
||||||
background: #fafafa;
|
|
||||||
}
|
|
@ -1,79 +0,0 @@
|
|||||||
<!doctype html>
|
|
||||||
<html lang="en-US">
|
|
||||||
<head>
|
|
||||||
<title>Swagger UI: OAuth2 Redirect</title>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<script>
|
|
||||||
'use strict';
|
|
||||||
function run () {
|
|
||||||
var oauth2 = window.opener.swaggerUIRedirectOauth2;
|
|
||||||
var sentState = oauth2.state;
|
|
||||||
var redirectUrl = oauth2.redirectUrl;
|
|
||||||
var isValid, qp, arr;
|
|
||||||
|
|
||||||
if (/code|token|error/.test(window.location.hash)) {
|
|
||||||
qp = window.location.hash.substring(1).replace('?', '&');
|
|
||||||
} else {
|
|
||||||
qp = location.search.substring(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
arr = qp.split("&");
|
|
||||||
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
|
|
||||||
qp = qp ? JSON.parse('{' + arr.join() + '}',
|
|
||||||
function (key, value) {
|
|
||||||
return key === "" ? value : decodeURIComponent(value);
|
|
||||||
}
|
|
||||||
) : {};
|
|
||||||
|
|
||||||
isValid = qp.state === sentState;
|
|
||||||
|
|
||||||
if ((
|
|
||||||
oauth2.auth.schema.get("flow") === "accessCode" ||
|
|
||||||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
|
|
||||||
oauth2.auth.schema.get("flow") === "authorization_code"
|
|
||||||
) && !oauth2.auth.code) {
|
|
||||||
if (!isValid) {
|
|
||||||
oauth2.errCb({
|
|
||||||
authId: oauth2.auth.name,
|
|
||||||
source: "auth",
|
|
||||||
level: "warning",
|
|
||||||
message: "Authorization may be unsafe, passed state was changed in server. The passed state wasn't returned from auth server."
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (qp.code) {
|
|
||||||
delete oauth2.state;
|
|
||||||
oauth2.auth.code = qp.code;
|
|
||||||
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
|
|
||||||
} else {
|
|
||||||
let oauthErrorMsg;
|
|
||||||
if (qp.error) {
|
|
||||||
oauthErrorMsg = "["+qp.error+"]: " +
|
|
||||||
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
|
|
||||||
(qp.error_uri ? "More info: "+qp.error_uri : "");
|
|
||||||
}
|
|
||||||
|
|
||||||
oauth2.errCb({
|
|
||||||
authId: oauth2.auth.name,
|
|
||||||
source: "auth",
|
|
||||||
level: "error",
|
|
||||||
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server."
|
|
||||||
});
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
|
|
||||||
}
|
|
||||||
window.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (document.readyState !== 'loading') {
|
|
||||||
run();
|
|
||||||
} else {
|
|
||||||
document.addEventListener('DOMContentLoaded', function () {
|
|
||||||
run();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
</script>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -1,20 +0,0 @@
|
|||||||
window.onload = function() {
|
|
||||||
//<editor-fold desc="Changeable Configuration Block">
|
|
||||||
|
|
||||||
// the following lines will be replaced by docker/configurator, when it runs in a docker-container
|
|
||||||
window.ui = SwaggerUIBundle({
|
|
||||||
url: "openapi3_0.yaml",
|
|
||||||
dom_id: '#swagger-ui',
|
|
||||||
deepLinking: true,
|
|
||||||
presets: [
|
|
||||||
SwaggerUIBundle.presets.apis,
|
|
||||||
SwaggerUIStandalonePreset
|
|
||||||
],
|
|
||||||
plugins: [
|
|
||||||
SwaggerUIBundle.plugins.DownloadUrl
|
|
||||||
],
|
|
||||||
layout: "StandaloneLayout"
|
|
||||||
});
|
|
||||||
|
|
||||||
//</editor-fold>
|
|
||||||
};
|
|
@ -1,50 +0,0 @@
|
|||||||
name: invokeai
|
|
||||||
channels:
|
|
||||||
- pytorch
|
|
||||||
- conda-forge
|
|
||||||
- defaults
|
|
||||||
dependencies:
|
|
||||||
- albumentations=0.4.3
|
|
||||||
- cudatoolkit
|
|
||||||
- einops=0.3.0
|
|
||||||
- eventlet
|
|
||||||
- flask-socketio=5.3.0
|
|
||||||
- flask=2.1.*
|
|
||||||
- flask_cors=3.0.10
|
|
||||||
- imageio-ffmpeg=0.4.2
|
|
||||||
- imageio=2.9.0
|
|
||||||
- kornia=0.6
|
|
||||||
- numpy=1.19
|
|
||||||
- opencv=4.6.0
|
|
||||||
- pillow=8.*
|
|
||||||
- pip>=22.2.2
|
|
||||||
- pudb=2019.2
|
|
||||||
- python=3.9.*
|
|
||||||
- pytorch
|
|
||||||
- pytorch-lightning=1.7.7
|
|
||||||
- send2trash=1.8.0
|
|
||||||
- streamlit
|
|
||||||
- tokenizers>=0.11.1,!=0.11.3,<0.13
|
|
||||||
- torch-fidelity=0.3.0
|
|
||||||
- torchmetrics=0.7.0
|
|
||||||
- torchvision
|
|
||||||
- transformers~=4.25
|
|
||||||
- pip:
|
|
||||||
- accelerate
|
|
||||||
- diffusers[torch]~=0.11
|
|
||||||
- getpass_asterisk
|
|
||||||
- huggingface-hub>=0.11.1
|
|
||||||
- omegaconf==2.1.1
|
|
||||||
- picklescan
|
|
||||||
- pyreadline3
|
|
||||||
- realesrgan
|
|
||||||
- requests==2.25.1
|
|
||||||
- safetensors
|
|
||||||
- taming-transformers-rom1504
|
|
||||||
- test-tube>=0.7.5
|
|
||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
|
||||||
- -e .
|
|
@ -1,51 +0,0 @@
|
|||||||
name: invokeai
|
|
||||||
channels:
|
|
||||||
- pytorch
|
|
||||||
- conda-forge
|
|
||||||
- defaults
|
|
||||||
dependencies:
|
|
||||||
- python=3.9.*
|
|
||||||
- pip=22.2.2
|
|
||||||
- numpy=1.23.3
|
|
||||||
- pip:
|
|
||||||
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
|
||||||
- accelerate
|
|
||||||
- albumentations==0.4.3
|
|
||||||
- diffusers[torch]~=0.11
|
|
||||||
- einops==0.3.0
|
|
||||||
- eventlet
|
|
||||||
- flask==2.1.3
|
|
||||||
- flask_cors==3.0.10
|
|
||||||
- flask_socketio==5.3.0
|
|
||||||
- getpass_asterisk
|
|
||||||
- huggingface-hub>=0.11.1
|
|
||||||
- imageio-ffmpeg==0.4.2
|
|
||||||
- imageio==2.9.0
|
|
||||||
- kornia==0.6.0
|
|
||||||
- omegaconf==2.2.3
|
|
||||||
- opencv-python==4.5.5.64
|
|
||||||
- picklescan
|
|
||||||
- pillow==9.2.0
|
|
||||||
- pudb==2019.2
|
|
||||||
- pyreadline3
|
|
||||||
- pytorch-lightning==1.7.7
|
|
||||||
- realesrgan
|
|
||||||
- requests==2.25.1
|
|
||||||
- safetensors
|
|
||||||
- send2trash==1.8.0
|
|
||||||
- streamlit==1.12.0
|
|
||||||
- taming-transformers-rom1504
|
|
||||||
- test-tube>=0.7.5
|
|
||||||
- tqdm
|
|
||||||
- torch
|
|
||||||
- torch-fidelity==0.3.0
|
|
||||||
- torchaudio
|
|
||||||
- torchmetrics==0.7.0
|
|
||||||
- torchvision
|
|
||||||
- transformers~=4.25
|
|
||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
|
||||||
- -e .
|
|
@ -1,50 +0,0 @@
|
|||||||
name: invokeai
|
|
||||||
channels:
|
|
||||||
- pytorch
|
|
||||||
- conda-forge
|
|
||||||
- defaults
|
|
||||||
dependencies:
|
|
||||||
- python=3.9.*
|
|
||||||
- pip=22.2.2
|
|
||||||
- numpy=1.23.3
|
|
||||||
- torchvision=0.13.1
|
|
||||||
- torchaudio=0.12.1
|
|
||||||
- pytorch=1.12.1
|
|
||||||
- cudatoolkit=11.6
|
|
||||||
- pip:
|
|
||||||
- accelerate~=0.13
|
|
||||||
- albumentations==0.4.3
|
|
||||||
- diffusers[torch]~=0.11
|
|
||||||
- einops==0.3.0
|
|
||||||
- eventlet
|
|
||||||
- flask==2.1.3
|
|
||||||
- flask_cors==3.0.10
|
|
||||||
- flask_socketio==5.3.0
|
|
||||||
- getpass_asterisk
|
|
||||||
- huggingface-hub>=0.11.1
|
|
||||||
- imageio-ffmpeg==0.4.2
|
|
||||||
- imageio==2.9.0
|
|
||||||
- kornia==0.6.0
|
|
||||||
- omegaconf==2.2.3
|
|
||||||
- opencv-python==4.5.5.64
|
|
||||||
- picklescan
|
|
||||||
- pillow==9.2.0
|
|
||||||
- pudb==2019.2
|
|
||||||
- pyreadline3
|
|
||||||
- pytorch-lightning==1.7.7
|
|
||||||
- realesrgan
|
|
||||||
- requests==2.25.1
|
|
||||||
- safetensors~=0.2
|
|
||||||
- send2trash==1.8.0
|
|
||||||
- streamlit==1.12.0
|
|
||||||
- taming-transformers-rom1504
|
|
||||||
- test-tube>=0.7.5
|
|
||||||
- torch-fidelity==0.3.0
|
|
||||||
- torchmetrics==0.7.0
|
|
||||||
- transformers~=4.25
|
|
||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
|
||||||
- -e .
|
|
@ -1,69 +0,0 @@
|
|||||||
name: invokeai
|
|
||||||
channels:
|
|
||||||
- pytorch
|
|
||||||
- huggingface
|
|
||||||
- conda-forge
|
|
||||||
- defaults
|
|
||||||
dependencies:
|
|
||||||
- python=3.10
|
|
||||||
- pip>=22.2
|
|
||||||
- pytorch=1.12
|
|
||||||
- pytorch-lightning=1.7
|
|
||||||
- torchvision=0.13
|
|
||||||
- torchmetrics=0.10
|
|
||||||
- torch-fidelity=0.3
|
|
||||||
|
|
||||||
# I suggest to keep the other deps sorted for convenience.
|
|
||||||
# To determine what the latest versions should be, run:
|
|
||||||
#
|
|
||||||
# ```shell
|
|
||||||
# sed -E 's/invokeai/invokeai-updated/;20,99s/- ([^=]+)==.+/- \1/' environment-mac.yml > environment-mac-updated.yml
|
|
||||||
# CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac-updated.yml && conda list -n invokeai-updated | awk ' {print " - " $1 "==" $2;} '
|
|
||||||
# ```
|
|
||||||
- accelerate
|
|
||||||
- albumentations=1.2
|
|
||||||
- coloredlogs=15.0
|
|
||||||
- einops=0.3
|
|
||||||
- eventlet
|
|
||||||
- grpcio=1.46
|
|
||||||
- flask=2.1
|
|
||||||
- flask-socketio=5.3
|
|
||||||
- flask-cors=3.0
|
|
||||||
- humanfriendly=10.0
|
|
||||||
- imageio=2.21
|
|
||||||
- imageio-ffmpeg=0.4
|
|
||||||
- imgaug=0.4
|
|
||||||
- kornia=0.6
|
|
||||||
- mpmath=1.2
|
|
||||||
- nomkl=3
|
|
||||||
- numpy=1.23
|
|
||||||
- omegaconf=2.1
|
|
||||||
- openh264=2.3
|
|
||||||
- onnx=1.12
|
|
||||||
- onnxruntime=1.12
|
|
||||||
- pudb=2019.2
|
|
||||||
- protobuf=3.20
|
|
||||||
- py-opencv=4.6
|
|
||||||
- scipy=1.9
|
|
||||||
- streamlit=1.12
|
|
||||||
- sympy=1.10
|
|
||||||
- send2trash=1.8
|
|
||||||
- tensorboard=2.10
|
|
||||||
- transformers~=4.25
|
|
||||||
- pip:
|
|
||||||
- diffusers[torch]~=0.11
|
|
||||||
- safetensors~=0.2
|
|
||||||
- getpass_asterisk
|
|
||||||
- huggingface-hub
|
|
||||||
- picklescan
|
|
||||||
- requests==2.25.1
|
|
||||||
- taming-transformers-rom1504
|
|
||||||
- test-tube==0.7.5
|
|
||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
|
||||||
- -e .
|
|
||||||
variables:
|
|
||||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
|
@ -1,51 +0,0 @@
|
|||||||
name: invokeai
|
|
||||||
channels:
|
|
||||||
- pytorch
|
|
||||||
- conda-forge
|
|
||||||
- defaults
|
|
||||||
dependencies:
|
|
||||||
- python=3.10.*
|
|
||||||
- pip=22.2.2
|
|
||||||
- numpy=1.23.3
|
|
||||||
- torchvision=0.13.1
|
|
||||||
- torchaudio=0.12.1
|
|
||||||
- pytorch=1.12.1
|
|
||||||
- cudatoolkit=11.6
|
|
||||||
- pip:
|
|
||||||
- accelerate
|
|
||||||
- albumentations==0.4.3
|
|
||||||
- diffusers[torch]~=0.11
|
|
||||||
- einops==0.3.0
|
|
||||||
- eventlet
|
|
||||||
- flask==2.1.3
|
|
||||||
- flask_cors==3.0.10
|
|
||||||
- flask_socketio==5.3.0
|
|
||||||
- getpass_asterisk
|
|
||||||
- huggingface-hub>=0.11.1
|
|
||||||
- imageio-ffmpeg==0.4.2
|
|
||||||
- imageio==2.9.0
|
|
||||||
- kornia==0.6.0
|
|
||||||
- omegaconf==2.2.3
|
|
||||||
- opencv-python==4.5.5.64
|
|
||||||
- picklescan
|
|
||||||
- pillow==9.2.0
|
|
||||||
- pudb==2019.2
|
|
||||||
- pyreadline3
|
|
||||||
- pytorch-lightning==1.7.7
|
|
||||||
- realesrgan
|
|
||||||
- requests==2.25.1
|
|
||||||
- safetensors
|
|
||||||
- send2trash==1.8.0
|
|
||||||
- streamlit==1.12.0
|
|
||||||
- taming-transformers-rom1504
|
|
||||||
- test-tube>=0.7.5
|
|
||||||
- torch-fidelity==0.3.0
|
|
||||||
- torchmetrics==0.7.0
|
|
||||||
- transformers~=4.25
|
|
||||||
- windows-curses
|
|
||||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
|
||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan
|
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
|
||||||
- -e .
|
|
@ -1,46 +0,0 @@
|
|||||||
# pip will resolve the version which matches torch
|
|
||||||
accelerate
|
|
||||||
albumentations
|
|
||||||
datasets
|
|
||||||
diffusers[torch]~=0.12
|
|
||||||
dnspython==2.2.1
|
|
||||||
einops
|
|
||||||
eventlet
|
|
||||||
facexlib
|
|
||||||
flask==2.1.3
|
|
||||||
flask_cors==3.0.10
|
|
||||||
flask_socketio==5.3.0
|
|
||||||
flaskwebgui==1.0.3
|
|
||||||
getpass_asterisk
|
|
||||||
gfpgan==1.3.8
|
|
||||||
huggingface-hub>=0.11.1
|
|
||||||
imageio
|
|
||||||
imageio-ffmpeg
|
|
||||||
kornia
|
|
||||||
npyscreen
|
|
||||||
numpy==1.23.*
|
|
||||||
omegaconf
|
|
||||||
opencv-python
|
|
||||||
picklescan
|
|
||||||
pillow
|
|
||||||
pip>=22
|
|
||||||
prompt-toolkit
|
|
||||||
pudb
|
|
||||||
pyreadline3
|
|
||||||
pytorch-lightning==1.7.7
|
|
||||||
realesrgan
|
|
||||||
requests==2.25.1
|
|
||||||
safetensors
|
|
||||||
scikit-image>=0.19
|
|
||||||
send2trash
|
|
||||||
streamlit
|
|
||||||
taming-transformers-rom1504
|
|
||||||
test-tube>=0.7.5
|
|
||||||
torch-fidelity
|
|
||||||
torchmetrics
|
|
||||||
transformers~=4.26
|
|
||||||
windows-curses; sys_platform == 'win32'
|
|
||||||
https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip#egg=k-diffusion
|
|
||||||
https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.5.zip#egg=pypatchmatch
|
|
||||||
https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip#egg=clip
|
|
||||||
https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip#egg=clipseg
|
|
@ -1,6 +0,0 @@
|
|||||||
-r environments-and-requirements/requirements-base.txt
|
|
||||||
# Get hardware-appropriate torch/torchvision
|
|
||||||
--extra-index-url https://download.pytorch.org/whl/rocm5.2 --trusted-host https://download.pytorch.org
|
|
||||||
torch>=1.13.1
|
|
||||||
torchvision>=0.14.1
|
|
||||||
-e .
|
|
@ -1,6 +0,0 @@
|
|||||||
--trusted-host https://download.pytorch.org
|
|
||||||
-r environments-and-requirements/requirements-base.txt
|
|
||||||
torch>=1.13.1
|
|
||||||
torchvision>=0.14.1
|
|
||||||
xformers~=0.0.16
|
|
||||||
-e .
|
|
@ -1,6 +0,0 @@
|
|||||||
-r environments-and-requirements/requirements-base.txt
|
|
||||||
grpcio<1.51.0
|
|
||||||
protobuf==3.20.3
|
|
||||||
torch>=1.13.1
|
|
||||||
torchvision>=0.14.1
|
|
||||||
-e .
|
|
@ -1,7 +0,0 @@
|
|||||||
-r environments-and-requirements/requirements-base.txt
|
|
||||||
# Get hardware-appropriate torch/torchvision
|
|
||||||
--extra-index-url https://download.pytorch.org/whl/cu117 --trusted-host https://download.pytorch.org
|
|
||||||
torch==1.13.1
|
|
||||||
torchvision==0.14.1
|
|
||||||
xformers~=0.0.16
|
|
||||||
-e .
|
|
@ -11,25 +11,28 @@ if [[ -v "VIRTUAL_ENV" ]]; then
|
|||||||
exit -1
|
exit -1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)")
|
VERSION=$(cd ..; python -c "from invokeai.version import __version__ as version; print(version)")
|
||||||
PATCH=""
|
PATCH=""
|
||||||
VERSION="v${VERSION}${PATCH}"
|
VERSION="v${VERSION}${PATCH}"
|
||||||
|
LATEST_TAG="v3.0-latest"
|
||||||
|
|
||||||
echo Building installer for version $VERSION
|
echo Building installer for version $VERSION
|
||||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||||
|
|
||||||
read -e -p "Commit and tag this repo with ${VERSION} and 'latest'? [n]: " input
|
read -e -p "Tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
||||||
RESPONSE=${input:='n'}
|
RESPONSE=${input:='n'}
|
||||||
if [ "$RESPONSE" == 'y' ]; then
|
if [ "$RESPONSE" == 'y' ]; then
|
||||||
git commit -a
|
|
||||||
|
|
||||||
if ! git tag $VERSION ; then
|
if ! git tag $VERSION ; then
|
||||||
echo "Existing/invalid tag"
|
echo "Existing/invalid tag"
|
||||||
exit -1
|
exit -1
|
||||||
fi
|
fi
|
||||||
git push origin :refs/tags/latest
|
|
||||||
git tag -fa latest
|
git push origin :refs/tags/$LATEST_TAG
|
||||||
|
git tag -fa $LATEST_TAG
|
||||||
|
|
||||||
|
echo "remember to push --tags!"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ----------------------
|
# ----------------------
|
||||||
@ -54,12 +57,12 @@ rm -rf InvokeAI-Installer
|
|||||||
|
|
||||||
# copy content
|
# copy content
|
||||||
mkdir InvokeAI-Installer
|
mkdir InvokeAI-Installer
|
||||||
for f in templates *.py *.txt *.reg; do
|
for f in templates lib *.txt *.reg; do
|
||||||
cp -r ${f} InvokeAI-Installer/
|
cp -r ${f} InvokeAI-Installer/
|
||||||
done
|
done
|
||||||
|
|
||||||
# Move the wheel
|
# Move the wheel
|
||||||
mv dist/*.whl InvokeAI-Installer/
|
mv dist/*.whl InvokeAI-Installer/lib/
|
||||||
|
|
||||||
# Install scripts
|
# Install scripts
|
||||||
# Mac/Linux
|
# Mac/Linux
|
||||||
@ -73,17 +76,6 @@ cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
|||||||
# Zip everything up
|
# Zip everything up
|
||||||
zip -r InvokeAI-installer-$VERSION.zip InvokeAI-Installer
|
zip -r InvokeAI-installer-$VERSION.zip InvokeAI-Installer
|
||||||
|
|
||||||
# Updater
|
|
||||||
mkdir tmp
|
|
||||||
cp templates/update.sh.in tmp/update.sh
|
|
||||||
cp templates/update.bat.in tmp/update.bat
|
|
||||||
chmod +x tmp/update.sh
|
|
||||||
chmod +x tmp/update.bat
|
|
||||||
cd tmp
|
|
||||||
zip InvokeAI-updater-$VERSION.zip update.sh update.bat
|
|
||||||
cd ..
|
|
||||||
mv tmp/InvokeAI-updater-$VERSION.zip .
|
|
||||||
|
|
||||||
# clean up
|
# clean up
|
||||||
rm -rf InvokeAI-Installer tmp dist
|
rm -rf InvokeAI-Installer tmp dist
|
||||||
|
|
||||||
|
@ -66,8 +66,9 @@ del /q .tmp1 .tmp2
|
|||||||
|
|
||||||
@rem -------------- Install and Configure ---------------
|
@rem -------------- Install and Configure ---------------
|
||||||
|
|
||||||
call python main.py
|
call python .\lib\main.py
|
||||||
|
pause
|
||||||
|
exit /b
|
||||||
|
|
||||||
@rem ------------------------ Subroutines ---------------
|
@rem ------------------------ Subroutines ---------------
|
||||||
@rem routine to do comparison of semantic version numbers
|
@rem routine to do comparison of semantic version numbers
|
||||||
|
29
installer/install.sh.in
Normal file → Executable file
@ -3,5 +3,32 @@
|
|||||||
# make sure we are not already in a venv
|
# make sure we are not already in a venv
|
||||||
# (don't need to check status)
|
# (don't need to check status)
|
||||||
deactivate >/dev/null 2>&1
|
deactivate >/dev/null 2>&1
|
||||||
|
scriptdir=$(dirname "$0")
|
||||||
|
cd $scriptdir
|
||||||
|
|
||||||
exec python3 $(dirname $0)/main.py ${@}
|
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
||||||
|
|
||||||
|
MINIMUM_PYTHON_VERSION=3.9.0
|
||||||
|
MAXIMUM_PYTHON_VERSION=3.11.0
|
||||||
|
PYTHON=""
|
||||||
|
for candidate in python3.10 python3.9 python3 python ; do
|
||||||
|
if ppath=`which $candidate`; then
|
||||||
|
python_version=$($ppath -V | awk '{ print $2 }')
|
||||||
|
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
|
||||||
|
if [ $(version $python_version) -lt $(version "$MAXIMUM_PYTHON_VERSION") ]; then
|
||||||
|
PYTHON=$ppath
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -z "$PYTHON" ]; then
|
||||||
|
echo "A suitable Python interpreter could not be found"
|
||||||
|
echo "Please install Python 3.9 or higher before running this script. See instructions at $INSTRUCTIONS for help."
|
||||||
|
read -p "Press any key to exit"
|
||||||
|
exit -1
|
||||||
|
fi
|
||||||
|
|
||||||
|
exec $PYTHON ./lib/main.py ${@}
|
||||||
|
read -p "Press any key to exit"
|
||||||
|
@ -1,443 +0,0 @@
|
|||||||
# Copyright (c) 2023 Eugene Brodsky (https://github.com/ebr)
|
|
||||||
"""
|
|
||||||
InvokeAI installer script
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
import venv
|
|
||||||
from pathlib import Path
|
|
||||||
from tempfile import TemporaryDirectory
|
|
||||||
from typing import Union
|
|
||||||
|
|
||||||
SUPPORTED_PYTHON = ">=3.9.0,<3.11"
|
|
||||||
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
|
|
||||||
BOOTSTRAP_VENV_PREFIX = "invokeai-installer-tmp"
|
|
||||||
|
|
||||||
OS = platform.uname().system
|
|
||||||
ARCH = platform.uname().machine
|
|
||||||
VERSION = "latest"
|
|
||||||
|
|
||||||
### Feature flags
|
|
||||||
# Install the virtualenv into the runtime dir
|
|
||||||
FF_VENV_IN_RUNTIME = True
|
|
||||||
|
|
||||||
# Install the wheel packaged with the installer
|
|
||||||
FF_USE_LOCAL_WHEEL = True
|
|
||||||
|
|
||||||
|
|
||||||
class Installer:
|
|
||||||
"""
|
|
||||||
Deploys an InvokeAI installation into a given path
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self) -> None:
|
|
||||||
self.reqs = INSTALLER_REQS
|
|
||||||
self.preflight()
|
|
||||||
if os.getenv("VIRTUAL_ENV") is not None:
|
|
||||||
raise NotImplementedError("A virtual environment is already activated. Please 'deactivate' before installation.")
|
|
||||||
self.bootstrap()
|
|
||||||
|
|
||||||
def preflight(self) -> None:
|
|
||||||
"""
|
|
||||||
Preflight checks
|
|
||||||
"""
|
|
||||||
|
|
||||||
# TODO
|
|
||||||
# verify python version
|
|
||||||
# on macOS verify XCode tools are present
|
|
||||||
# verify libmesa, libglx on linux
|
|
||||||
# check that the system arch is not i386 (?)
|
|
||||||
# check that the system has a GPU, and the type of GPU
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
def mktemp_venv(self) -> TemporaryDirectory:
|
|
||||||
"""
|
|
||||||
Creates a temporary virtual environment for the installer itself
|
|
||||||
|
|
||||||
:return: path to the created virtual environment directory
|
|
||||||
:rtype: TemporaryDirectory
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Cleaning up temporary directories on Windows results in a race condition
|
|
||||||
# and a stack trace.
|
|
||||||
# `ignore_cleanup_errors` was only added in Python 3.10
|
|
||||||
# users of Python 3.9 will see a gnarly stack trace on installer exit
|
|
||||||
if OS == "Windows" and int(platform.python_version_tuple()[1]) >= 10:
|
|
||||||
venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX, ignore_cleanup_errors=True)
|
|
||||||
else:
|
|
||||||
venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX)
|
|
||||||
|
|
||||||
venv.create(venv_dir.name, with_pip=True)
|
|
||||||
self.venv_dir = venv_dir
|
|
||||||
set_sys_path(Path(venv_dir.name))
|
|
||||||
|
|
||||||
return venv_dir
|
|
||||||
|
|
||||||
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory:
|
|
||||||
"""
|
|
||||||
Bootstrap the installer venv with packages required at install time
|
|
||||||
|
|
||||||
:return: path to the virtual environment directory that was bootstrapped
|
|
||||||
:rtype: TemporaryDirectory
|
|
||||||
"""
|
|
||||||
|
|
||||||
print("Initializing the installer. This may take a minute - please wait...")
|
|
||||||
|
|
||||||
venv_dir = self.mktemp_venv()
|
|
||||||
pip = get_pip_from_venv(Path(venv_dir.name))
|
|
||||||
|
|
||||||
cmd = [pip, "install", "--require-virtualenv", "--use-pep517"]
|
|
||||||
cmd.extend(self.reqs)
|
|
||||||
|
|
||||||
try:
|
|
||||||
res = subprocess.check_output(cmd).decode()
|
|
||||||
if verbose:
|
|
||||||
print(res)
|
|
||||||
return venv_dir
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
print(e)
|
|
||||||
|
|
||||||
def app_venv(self, path: str = None):
|
|
||||||
"""
|
|
||||||
Create a virtualenv for the InvokeAI installation
|
|
||||||
"""
|
|
||||||
|
|
||||||
# explicit venv location
|
|
||||||
# currently unused in normal operation
|
|
||||||
# useful for testing or special cases
|
|
||||||
if path is not None:
|
|
||||||
venv_dir = Path(path)
|
|
||||||
|
|
||||||
# experimental / testing
|
|
||||||
elif not FF_VENV_IN_RUNTIME:
|
|
||||||
if OS == "Windows":
|
|
||||||
venv_dir_parent = os.getenv("APPDATA", "~/AppData/Roaming")
|
|
||||||
elif OS == "Darwin":
|
|
||||||
# there is no environment variable on macOS to find this
|
|
||||||
# TODO: confirm this is working as expected
|
|
||||||
venv_dir_parent = "~/Library/Application Support"
|
|
||||||
elif OS == "Linux":
|
|
||||||
venv_dir_parent = os.getenv("XDG_DATA_DIR", "~/.local/share")
|
|
||||||
venv_dir = Path(venv_dir_parent).expanduser().resolve() / f"InvokeAI/{VERSION}/venv"
|
|
||||||
|
|
||||||
# stable / current
|
|
||||||
else:
|
|
||||||
venv_dir = self.dest / ".venv"
|
|
||||||
|
|
||||||
venv.create(venv_dir, with_pip=True)
|
|
||||||
|
|
||||||
# upgrade pip in Python 3.9 environments
|
|
||||||
if int(platform.python_version_tuple()[1]) == 9:
|
|
||||||
|
|
||||||
from plumbum import FG, local
|
|
||||||
|
|
||||||
pip = local[get_pip_from_venv(venv_dir)]
|
|
||||||
pip[ "install", "--upgrade", "pip"] & FG
|
|
||||||
|
|
||||||
return venv_dir
|
|
||||||
|
|
||||||
def install(self, root: str = "~/invokeai", version: str = "latest", yes_to_all=False, find_links: Path = None) -> None:
|
|
||||||
"""
|
|
||||||
Install the InvokeAI application into the given runtime path
|
|
||||||
|
|
||||||
:param root: Destination path for the installation
|
|
||||||
:type root: str
|
|
||||||
:param version: InvokeAI version to install
|
|
||||||
:type version: str
|
|
||||||
:param yes: Accept defaults to all questions
|
|
||||||
:type yes: bool
|
|
||||||
:param find_links: A local directory to search for requirement wheels before going to remote indexes
|
|
||||||
:type find_links: Path
|
|
||||||
"""
|
|
||||||
|
|
||||||
import messages
|
|
||||||
|
|
||||||
messages.welcome()
|
|
||||||
|
|
||||||
self.dest = Path(root).expanduser().resolve() if yes_to_all else messages.dest_path(root)
|
|
||||||
|
|
||||||
# create the venv for the app
|
|
||||||
self.venv = self.app_venv()
|
|
||||||
|
|
||||||
self.instance = InvokeAiInstance(runtime=self.dest, venv=self.venv, version=version)
|
|
||||||
|
|
||||||
# install dependencies and the InvokeAI application
|
|
||||||
(extra_index_url,optional_modules) = get_torch_source() if not yes_to_all else (None,None)
|
|
||||||
self.instance.install(
|
|
||||||
extra_index_url,
|
|
||||||
optional_modules,
|
|
||||||
find_links,
|
|
||||||
)
|
|
||||||
|
|
||||||
# install the launch/update scripts into the runtime directory
|
|
||||||
self.instance.install_user_scripts()
|
|
||||||
|
|
||||||
# run through the configuration flow
|
|
||||||
self.instance.configure()
|
|
||||||
|
|
||||||
class InvokeAiInstance:
|
|
||||||
"""
|
|
||||||
Manages an installed instance of InvokeAI, comprising a virtual environment and a runtime directory.
|
|
||||||
The virtual environment *may* reside within the runtime directory.
|
|
||||||
A single runtime directory *may* be shared by multiple virtual environments, though this isn't currently tested or supported.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, runtime: Path, venv: Path, version: str) -> None:
|
|
||||||
|
|
||||||
self.runtime = runtime
|
|
||||||
self.venv = venv
|
|
||||||
self.pip = get_pip_from_venv(venv)
|
|
||||||
self.version = version
|
|
||||||
|
|
||||||
set_sys_path(venv)
|
|
||||||
os.environ["INVOKEAI_ROOT"] = str(self.runtime.expanduser().resolve())
|
|
||||||
os.environ["VIRTUAL_ENV"] = str(self.venv.expanduser().resolve())
|
|
||||||
|
|
||||||
def get(self) -> tuple[Path, Path]:
|
|
||||||
"""
|
|
||||||
Get the location of the virtualenv directory for this installation
|
|
||||||
|
|
||||||
:return: Paths of the runtime and the venv directory
|
|
||||||
:rtype: tuple[Path, Path]
|
|
||||||
"""
|
|
||||||
|
|
||||||
return (self.runtime, self.venv)
|
|
||||||
|
|
||||||
def install(self, extra_index_url=None, optional_modules=None, find_links=None):
|
|
||||||
"""
|
|
||||||
Install this instance, including dependencies and the app itself
|
|
||||||
|
|
||||||
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
|
||||||
:type extra_index_url: str
|
|
||||||
"""
|
|
||||||
|
|
||||||
import messages
|
|
||||||
|
|
||||||
# install torch first to ensure the correct version gets installed.
|
|
||||||
# works with either source or wheel install with negligible impact on installation times.
|
|
||||||
messages.simple_banner("Installing PyTorch :fire:")
|
|
||||||
self.install_torch(extra_index_url, find_links)
|
|
||||||
|
|
||||||
messages.simple_banner("Installing the InvokeAI Application :art:")
|
|
||||||
self.install_app(extra_index_url, optional_modules, find_links)
|
|
||||||
|
|
||||||
def install_torch(self, extra_index_url=None, find_links=None):
|
|
||||||
"""
|
|
||||||
Install PyTorch
|
|
||||||
"""
|
|
||||||
|
|
||||||
from plumbum import FG, local
|
|
||||||
|
|
||||||
pip = local[self.pip]
|
|
||||||
|
|
||||||
(
|
|
||||||
pip[
|
|
||||||
"install",
|
|
||||||
"--require-virtualenv",
|
|
||||||
"torch",
|
|
||||||
"torchvision",
|
|
||||||
"--find-links" if find_links is not None else None,
|
|
||||||
find_links,
|
|
||||||
"--extra-index-url" if extra_index_url is not None else None,
|
|
||||||
extra_index_url,
|
|
||||||
]
|
|
||||||
& FG
|
|
||||||
)
|
|
||||||
|
|
||||||
def install_app(self, extra_index_url=None, optional_modules=None, find_links=None):
|
|
||||||
"""
|
|
||||||
Install the application with pip.
|
|
||||||
Supports installation from PyPi or from a local source directory.
|
|
||||||
|
|
||||||
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
|
||||||
:type extra_index_url: str
|
|
||||||
|
|
||||||
:param optional_modules: optional modules to install using "[module1,module2]" format.
|
|
||||||
:type optional_modules: str
|
|
||||||
|
|
||||||
:param find_links: path to a directory containing wheels to be searched prior to going to the internet
|
|
||||||
:type find_links: Path
|
|
||||||
"""
|
|
||||||
|
|
||||||
## this only applies to pypi installs; TODO actually use this
|
|
||||||
if self.version == "pre":
|
|
||||||
version = None
|
|
||||||
pre = "--pre"
|
|
||||||
else:
|
|
||||||
version = self.version
|
|
||||||
pre = None
|
|
||||||
|
|
||||||
## TODO: only local wheel will be installed as of now; support for --version arg is TODO
|
|
||||||
if FF_USE_LOCAL_WHEEL:
|
|
||||||
# if no wheel, try to do a source install before giving up
|
|
||||||
try:
|
|
||||||
src = str(next(Path.cwd().glob("InvokeAI-*.whl")))
|
|
||||||
except StopIteration:
|
|
||||||
try:
|
|
||||||
src = Path(__file__).parents[1].expanduser().resolve()
|
|
||||||
# if the above directory contains one of these files, we'll do a source install
|
|
||||||
next(src.glob("pyproject.toml"))
|
|
||||||
next(src.glob("ldm"))
|
|
||||||
except StopIteration:
|
|
||||||
print("Unable to find a wheel or perform a source install. Giving up.")
|
|
||||||
|
|
||||||
elif version == "source":
|
|
||||||
# this makes an assumption about the location of the installer package in the source tree
|
|
||||||
src = Path(__file__).parents[1].expanduser().resolve()
|
|
||||||
else:
|
|
||||||
# will install from PyPi
|
|
||||||
src = f"invokeai=={version}" if version is not None else "invokeai"
|
|
||||||
|
|
||||||
from plumbum import FG, local
|
|
||||||
|
|
||||||
pip = local[self.pip]
|
|
||||||
|
|
||||||
(
|
|
||||||
pip[
|
|
||||||
"install",
|
|
||||||
"--require-virtualenv",
|
|
||||||
"--use-pep517",
|
|
||||||
str(src)+(optional_modules if optional_modules else ''),
|
|
||||||
"--find-links" if find_links is not None else None,
|
|
||||||
find_links,
|
|
||||||
"--extra-index-url" if extra_index_url is not None else None,
|
|
||||||
extra_index_url,
|
|
||||||
pre,
|
|
||||||
]
|
|
||||||
& FG
|
|
||||||
)
|
|
||||||
|
|
||||||
def configure(self):
|
|
||||||
"""
|
|
||||||
Configure the InvokeAI runtime directory
|
|
||||||
"""
|
|
||||||
|
|
||||||
new_argv = [sys.argv[0]]
|
|
||||||
for i in range(1,len(sys.argv)):
|
|
||||||
el = sys.argv[i]
|
|
||||||
if el in ['-r','--root']:
|
|
||||||
new_argv.append(el)
|
|
||||||
new_argv.append(sys.argv[i+1])
|
|
||||||
elif el in ['-y','--yes','--yes-to-all']:
|
|
||||||
new_argv.append(el)
|
|
||||||
sys.argv = new_argv
|
|
||||||
|
|
||||||
from messages import introduction
|
|
||||||
|
|
||||||
introduction()
|
|
||||||
|
|
||||||
from ldm.invoke.config import configure_invokeai
|
|
||||||
|
|
||||||
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
|
||||||
# from the installer will also automatically propagate down to the config script.
|
|
||||||
# this may change in the future with config refactoring!
|
|
||||||
|
|
||||||
# set sys.argv to a consistent state
|
|
||||||
|
|
||||||
configure_invokeai.main()
|
|
||||||
|
|
||||||
def install_user_scripts(self):
|
|
||||||
"""
|
|
||||||
Copy the launch and update scripts to the runtime dir
|
|
||||||
"""
|
|
||||||
|
|
||||||
ext = "bat" if OS == "Windows" else "sh"
|
|
||||||
|
|
||||||
for script in ["invoke", "update"]:
|
|
||||||
src = Path(__file__).parent / "templates" / f"{script}.{ext}.in"
|
|
||||||
dest = self.runtime / f"{script}.{ext}"
|
|
||||||
shutil.copy(src, dest)
|
|
||||||
os.chmod(dest, 0o0755)
|
|
||||||
|
|
||||||
def update(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def remove(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
### Utility functions ###
|
|
||||||
|
|
||||||
|
|
||||||
def get_pip_from_venv(venv_path: Path) -> str:
|
|
||||||
"""
|
|
||||||
Given a path to a virtual environment, get the absolute path to the `pip` executable
|
|
||||||
in a cross-platform fashion. Does not validate that the pip executable
|
|
||||||
actually exists in the virtualenv.
|
|
||||||
|
|
||||||
:param venv_path: Path to the virtual environment
|
|
||||||
:type venv_path: Path
|
|
||||||
:return: Absolute path to the pip executable
|
|
||||||
:rtype: str
|
|
||||||
"""
|
|
||||||
|
|
||||||
pip = "Scripts\pip.exe" if OS == "Windows" else "bin/pip"
|
|
||||||
return str(venv_path.expanduser().resolve() / pip)
|
|
||||||
|
|
||||||
|
|
||||||
def set_sys_path(venv_path: Path) -> None:
|
|
||||||
"""
|
|
||||||
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
|
|
||||||
such that packages from the given venv may be imported in the current process.
|
|
||||||
Ensure that the packages from system environment are not visible (emulate
|
|
||||||
the virtual env 'activate' script) - this doesn't work on Windows yet.
|
|
||||||
|
|
||||||
:param venv_path: Path to the virtual environment
|
|
||||||
:type venv_path: Path
|
|
||||||
"""
|
|
||||||
|
|
||||||
# filter out any paths in sys.path that may be system- or user-wide
|
|
||||||
# but leave the temporary bootstrap virtualenv as it contains packages we
|
|
||||||
# temporarily need at install time
|
|
||||||
sys.path = list(filter(
|
|
||||||
lambda p: not p.endswith("-packages")
|
|
||||||
or p.find(BOOTSTRAP_VENV_PREFIX) != -1,
|
|
||||||
sys.path
|
|
||||||
))
|
|
||||||
|
|
||||||
# determine site-packages/lib directory location for the venv
|
|
||||||
lib = "Lib" if OS == "Windows" else f"lib/python{sys.version_info.major}.{sys.version_info.minor}"
|
|
||||||
|
|
||||||
# add the site-packages location to the venv
|
|
||||||
sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve()))
|
|
||||||
|
|
||||||
|
|
||||||
def get_torch_source() -> (Union[str, None],str):
|
|
||||||
"""
|
|
||||||
Determine the extra index URL for pip to use for torch installation.
|
|
||||||
This depends on the OS and the graphics accelerator in use.
|
|
||||||
This is only applicable to Windows and Linux, since PyTorch does not
|
|
||||||
offer accelerated builds for macOS.
|
|
||||||
|
|
||||||
Prefer CUDA-enabled wheels if the user wasn't sure of their GPU, as it will fallback to CPU if possible.
|
|
||||||
|
|
||||||
A NoneType return means just go to PyPi.
|
|
||||||
|
|
||||||
:return: tuple consisting of (extra index url or None, optional modules to load or None)
|
|
||||||
:rtype: list
|
|
||||||
"""
|
|
||||||
|
|
||||||
from messages import graphical_accelerator
|
|
||||||
|
|
||||||
# device can be one of: "cuda", "rocm", "cpu", "idk"
|
|
||||||
device = graphical_accelerator()
|
|
||||||
|
|
||||||
url = None
|
|
||||||
optional_modules = None
|
|
||||||
if OS == "Linux":
|
|
||||||
if device == "rocm":
|
|
||||||
url = "https://download.pytorch.org/whl/rocm5.2"
|
|
||||||
elif device == "cpu":
|
|
||||||
url = "https://download.pytorch.org/whl/cpu"
|
|
||||||
|
|
||||||
if device == 'cuda':
|
|
||||||
optional_modules = '[xformers]'
|
|
||||||
|
|
||||||
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13
|
|
||||||
|
|
||||||
return (url, optional_modules)
|
|
469
installer/lib/installer.py
Normal file
@ -0,0 +1,469 @@
|
|||||||
|
# Copyright (c) 2023 Eugene Brodsky (https://github.com/ebr)
|
||||||
|
"""
|
||||||
|
InvokeAI installer script
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import platform
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import venv
|
||||||
|
from pathlib import Path
|
||||||
|
from tempfile import TemporaryDirectory
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
SUPPORTED_PYTHON = ">=3.9.0,<3.11"
|
||||||
|
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
|
||||||
|
BOOTSTRAP_VENV_PREFIX = "invokeai-installer-tmp"
|
||||||
|
|
||||||
|
OS = platform.uname().system
|
||||||
|
ARCH = platform.uname().machine
|
||||||
|
VERSION = "latest"
|
||||||
|
|
||||||
|
### Feature flags
|
||||||
|
# Install the virtualenv into the runtime dir
|
||||||
|
FF_VENV_IN_RUNTIME = True
|
||||||
|
|
||||||
|
# Install the wheel packaged with the installer
|
||||||
|
FF_USE_LOCAL_WHEEL = True
|
||||||
|
|
||||||
|
|
||||||
|
class Installer:
|
||||||
|
"""
|
||||||
|
Deploys an InvokeAI installation into a given path
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.reqs = INSTALLER_REQS
|
||||||
|
self.preflight()
|
||||||
|
if os.getenv("VIRTUAL_ENV") is not None:
|
||||||
|
print("A virtual environment is already activated. Please 'deactivate' before installation.")
|
||||||
|
sys.exit(-1)
|
||||||
|
self.bootstrap()
|
||||||
|
|
||||||
|
def preflight(self) -> None:
|
||||||
|
"""
|
||||||
|
Preflight checks
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO
|
||||||
|
# verify python version
|
||||||
|
# on macOS verify XCode tools are present
|
||||||
|
# verify libmesa, libglx on linux
|
||||||
|
# check that the system arch is not i386 (?)
|
||||||
|
# check that the system has a GPU, and the type of GPU
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
def mktemp_venv(self) -> TemporaryDirectory:
|
||||||
|
"""
|
||||||
|
Creates a temporary virtual environment for the installer itself
|
||||||
|
|
||||||
|
:return: path to the created virtual environment directory
|
||||||
|
:rtype: TemporaryDirectory
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Cleaning up temporary directories on Windows results in a race condition
|
||||||
|
# and a stack trace.
|
||||||
|
# `ignore_cleanup_errors` was only added in Python 3.10
|
||||||
|
# users of Python 3.9 will see a gnarly stack trace on installer exit
|
||||||
|
if OS == "Windows" and int(platform.python_version_tuple()[1]) >= 10:
|
||||||
|
venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX, ignore_cleanup_errors=True)
|
||||||
|
else:
|
||||||
|
venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX)
|
||||||
|
|
||||||
|
venv.create(venv_dir.name, with_pip=True)
|
||||||
|
self.venv_dir = venv_dir
|
||||||
|
set_sys_path(Path(venv_dir.name))
|
||||||
|
|
||||||
|
return venv_dir
|
||||||
|
|
||||||
|
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory:
|
||||||
|
"""
|
||||||
|
Bootstrap the installer venv with packages required at install time
|
||||||
|
|
||||||
|
:return: path to the virtual environment directory that was bootstrapped
|
||||||
|
:rtype: TemporaryDirectory
|
||||||
|
"""
|
||||||
|
|
||||||
|
print("Initializing the installer. This may take a minute - please wait...")
|
||||||
|
|
||||||
|
venv_dir = self.mktemp_venv()
|
||||||
|
pip = get_pip_from_venv(Path(venv_dir.name))
|
||||||
|
|
||||||
|
cmd = [pip, "install", "--require-virtualenv", "--use-pep517"]
|
||||||
|
cmd.extend(self.reqs)
|
||||||
|
|
||||||
|
try:
|
||||||
|
res = subprocess.check_output(cmd).decode()
|
||||||
|
if verbose:
|
||||||
|
print(res)
|
||||||
|
return venv_dir
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
def app_venv(self, path: str = None):
|
||||||
|
"""
|
||||||
|
Create a virtualenv for the InvokeAI installation
|
||||||
|
"""
|
||||||
|
|
||||||
|
# explicit venv location
|
||||||
|
# currently unused in normal operation
|
||||||
|
# useful for testing or special cases
|
||||||
|
if path is not None:
|
||||||
|
venv_dir = Path(path)
|
||||||
|
|
||||||
|
# experimental / testing
|
||||||
|
elif not FF_VENV_IN_RUNTIME:
|
||||||
|
if OS == "Windows":
|
||||||
|
venv_dir_parent = os.getenv("APPDATA", "~/AppData/Roaming")
|
||||||
|
elif OS == "Darwin":
|
||||||
|
# there is no environment variable on macOS to find this
|
||||||
|
# TODO: confirm this is working as expected
|
||||||
|
venv_dir_parent = "~/Library/Application Support"
|
||||||
|
elif OS == "Linux":
|
||||||
|
venv_dir_parent = os.getenv("XDG_DATA_DIR", "~/.local/share")
|
||||||
|
venv_dir = Path(venv_dir_parent).expanduser().resolve() / f"InvokeAI/{VERSION}/venv"
|
||||||
|
|
||||||
|
# stable / current
|
||||||
|
else:
|
||||||
|
venv_dir = self.dest / ".venv"
|
||||||
|
|
||||||
|
# Prefer to copy python executables
|
||||||
|
# so that updates to system python don't break InvokeAI
|
||||||
|
try:
|
||||||
|
venv.create(venv_dir, with_pip=True)
|
||||||
|
# If installing over an existing environment previously created with symlinks,
|
||||||
|
# the executables will fail to copy. Keep symlinks in that case
|
||||||
|
except shutil.SameFileError:
|
||||||
|
venv.create(venv_dir, with_pip=True, symlinks=True)
|
||||||
|
|
||||||
|
# upgrade pip in Python 3.9 environments
|
||||||
|
if int(platform.python_version_tuple()[1]) == 9:
|
||||||
|
|
||||||
|
from plumbum import FG, local
|
||||||
|
|
||||||
|
pip = local[get_pip_from_venv(venv_dir)]
|
||||||
|
pip[ "install", "--upgrade", "pip"] & FG
|
||||||
|
|
||||||
|
return venv_dir
|
||||||
|
|
||||||
|
def install(self, root: str = "~/invokeai", version: str = "latest", yes_to_all=False, find_links: Path = None) -> None:
|
||||||
|
"""
|
||||||
|
Install the InvokeAI application into the given runtime path
|
||||||
|
|
||||||
|
:param root: Destination path for the installation
|
||||||
|
:type root: str
|
||||||
|
:param version: InvokeAI version to install
|
||||||
|
:type version: str
|
||||||
|
:param yes: Accept defaults to all questions
|
||||||
|
:type yes: bool
|
||||||
|
:param find_links: A local directory to search for requirement wheels before going to remote indexes
|
||||||
|
:type find_links: Path
|
||||||
|
"""
|
||||||
|
|
||||||
|
import messages
|
||||||
|
|
||||||
|
messages.welcome()
|
||||||
|
|
||||||
|
self.dest = Path(root).expanduser().resolve() if yes_to_all else messages.dest_path(root)
|
||||||
|
|
||||||
|
# create the venv for the app
|
||||||
|
self.venv = self.app_venv()
|
||||||
|
|
||||||
|
self.instance = InvokeAiInstance(runtime=self.dest, venv=self.venv, version=version)
|
||||||
|
|
||||||
|
# install dependencies and the InvokeAI application
|
||||||
|
(extra_index_url,optional_modules) = get_torch_source() if not yes_to_all else (None,None)
|
||||||
|
self.instance.install(
|
||||||
|
extra_index_url,
|
||||||
|
optional_modules,
|
||||||
|
find_links,
|
||||||
|
)
|
||||||
|
|
||||||
|
# install the launch/update scripts into the runtime directory
|
||||||
|
self.instance.install_user_scripts()
|
||||||
|
|
||||||
|
# run through the configuration flow
|
||||||
|
self.instance.configure()
|
||||||
|
|
||||||
|
class InvokeAiInstance:
|
||||||
|
"""
|
||||||
|
Manages an installed instance of InvokeAI, comprising a virtual environment and a runtime directory.
|
||||||
|
The virtual environment *may* reside within the runtime directory.
|
||||||
|
A single runtime directory *may* be shared by multiple virtual environments, though this isn't currently tested or supported.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, runtime: Path, venv: Path, version: str) -> None:
|
||||||
|
|
||||||
|
self.runtime = runtime
|
||||||
|
self.venv = venv
|
||||||
|
self.pip = get_pip_from_venv(venv)
|
||||||
|
self.version = version
|
||||||
|
|
||||||
|
set_sys_path(venv)
|
||||||
|
os.environ["INVOKEAI_ROOT"] = str(self.runtime.expanduser().resolve())
|
||||||
|
os.environ["VIRTUAL_ENV"] = str(self.venv.expanduser().resolve())
|
||||||
|
|
||||||
|
def get(self) -> tuple[Path, Path]:
|
||||||
|
"""
|
||||||
|
Get the location of the virtualenv directory for this installation
|
||||||
|
|
||||||
|
:return: Paths of the runtime and the venv directory
|
||||||
|
:rtype: tuple[Path, Path]
|
||||||
|
"""
|
||||||
|
|
||||||
|
return (self.runtime, self.venv)
|
||||||
|
|
||||||
|
def install(self, extra_index_url=None, optional_modules=None, find_links=None):
|
||||||
|
"""
|
||||||
|
Install this instance, including dependencies and the app itself
|
||||||
|
|
||||||
|
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
||||||
|
:type extra_index_url: str
|
||||||
|
"""
|
||||||
|
|
||||||
|
import messages
|
||||||
|
|
||||||
|
# install torch first to ensure the correct version gets installed.
|
||||||
|
# works with either source or wheel install with negligible impact on installation times.
|
||||||
|
messages.simple_banner("Installing PyTorch :fire:")
|
||||||
|
self.install_torch(extra_index_url, find_links)
|
||||||
|
|
||||||
|
messages.simple_banner("Installing the InvokeAI Application :art:")
|
||||||
|
self.install_app(extra_index_url, optional_modules, find_links)
|
||||||
|
|
||||||
|
def install_torch(self, extra_index_url=None, find_links=None):
|
||||||
|
"""
|
||||||
|
Install PyTorch
|
||||||
|
"""
|
||||||
|
|
||||||
|
from plumbum import FG, local
|
||||||
|
|
||||||
|
pip = local[self.pip]
|
||||||
|
|
||||||
|
(
|
||||||
|
pip[
|
||||||
|
"install",
|
||||||
|
"--require-virtualenv",
|
||||||
|
"torch",
|
||||||
|
"torchvision",
|
||||||
|
"--force-reinstall",
|
||||||
|
"--find-links" if find_links is not None else None,
|
||||||
|
find_links,
|
||||||
|
"--extra-index-url" if extra_index_url is not None else None,
|
||||||
|
extra_index_url,
|
||||||
|
]
|
||||||
|
& FG
|
||||||
|
)
|
||||||
|
|
||||||
|
def install_app(self, extra_index_url=None, optional_modules=None, find_links=None):
|
||||||
|
"""
|
||||||
|
Install the application with pip.
|
||||||
|
Supports installation from PyPi or from a local source directory.
|
||||||
|
|
||||||
|
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
||||||
|
:type extra_index_url: str
|
||||||
|
|
||||||
|
:param optional_modules: optional modules to install using "[module1,module2]" format.
|
||||||
|
:type optional_modules: str
|
||||||
|
|
||||||
|
:param find_links: path to a directory containing wheels to be searched prior to going to the internet
|
||||||
|
:type find_links: Path
|
||||||
|
"""
|
||||||
|
|
||||||
|
## this only applies to pypi installs; TODO actually use this
|
||||||
|
if self.version == "pre":
|
||||||
|
version = None
|
||||||
|
pre = "--pre"
|
||||||
|
else:
|
||||||
|
version = self.version
|
||||||
|
pre = None
|
||||||
|
|
||||||
|
## TODO: only local wheel will be installed as of now; support for --version arg is TODO
|
||||||
|
if FF_USE_LOCAL_WHEEL:
|
||||||
|
# if no wheel, try to do a source install before giving up
|
||||||
|
try:
|
||||||
|
src = str(next(Path(__file__).parent.glob("InvokeAI-*.whl")))
|
||||||
|
except StopIteration:
|
||||||
|
try:
|
||||||
|
src = Path(__file__).parents[1].expanduser().resolve()
|
||||||
|
# if the above directory contains one of these files, we'll do a source install
|
||||||
|
next(src.glob("pyproject.toml"))
|
||||||
|
next(src.glob("invokeai"))
|
||||||
|
except StopIteration:
|
||||||
|
print("Unable to find a wheel or perform a source install. Giving up.")
|
||||||
|
|
||||||
|
elif version == "source":
|
||||||
|
# this makes an assumption about the location of the installer package in the source tree
|
||||||
|
src = Path(__file__).parents[1].expanduser().resolve()
|
||||||
|
else:
|
||||||
|
# will install from PyPi
|
||||||
|
src = f"invokeai=={version}" if version is not None else "invokeai"
|
||||||
|
|
||||||
|
from plumbum import FG, local
|
||||||
|
|
||||||
|
pip = local[self.pip]
|
||||||
|
|
||||||
|
(
|
||||||
|
pip[
|
||||||
|
"install",
|
||||||
|
"--require-virtualenv",
|
||||||
|
"--use-pep517",
|
||||||
|
str(src)+(optional_modules if optional_modules else ''),
|
||||||
|
"--find-links" if find_links is not None else None,
|
||||||
|
find_links,
|
||||||
|
"--extra-index-url" if extra_index_url is not None else None,
|
||||||
|
extra_index_url,
|
||||||
|
pre,
|
||||||
|
]
|
||||||
|
& FG
|
||||||
|
)
|
||||||
|
|
||||||
|
def configure(self):
|
||||||
|
"""
|
||||||
|
Configure the InvokeAI runtime directory
|
||||||
|
"""
|
||||||
|
|
||||||
|
# set sys.argv to a consistent state
|
||||||
|
new_argv = [sys.argv[0]]
|
||||||
|
for i in range(1,len(sys.argv)):
|
||||||
|
el = sys.argv[i]
|
||||||
|
if el in ['-r','--root']:
|
||||||
|
new_argv.append(el)
|
||||||
|
new_argv.append(sys.argv[i+1])
|
||||||
|
elif el in ['-y','--yes','--yes-to-all']:
|
||||||
|
new_argv.append(el)
|
||||||
|
sys.argv = new_argv
|
||||||
|
|
||||||
|
import requests # to catch download exceptions
|
||||||
|
from messages import introduction
|
||||||
|
|
||||||
|
introduction()
|
||||||
|
|
||||||
|
from invokeai.frontend.install import invokeai_configure
|
||||||
|
|
||||||
|
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
||||||
|
# from the installer will also automatically propagate down to the config script.
|
||||||
|
# this may change in the future with config refactoring!
|
||||||
|
succeeded = False
|
||||||
|
try:
|
||||||
|
invokeai_configure()
|
||||||
|
succeeded = True
|
||||||
|
except requests.exceptions.ConnectionError as e:
|
||||||
|
print(f'\nA network error was encountered during configuration and download: {str(e)}')
|
||||||
|
except OSError as e:
|
||||||
|
print(f'\nAn OS error was encountered during configuration and download: {str(e)}')
|
||||||
|
except Exception as e:
|
||||||
|
print(f'\nA problem was encountered during the configuration and download steps: {str(e)}')
|
||||||
|
finally:
|
||||||
|
if not succeeded:
|
||||||
|
print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
|
||||||
|
print('and choose option 7 to fix a broken install, optionally followed by option 5 to install models.')
|
||||||
|
print('Alternatively you can relaunch the installer.')
|
||||||
|
|
||||||
|
def install_user_scripts(self):
|
||||||
|
"""
|
||||||
|
Copy the launch and update scripts to the runtime dir
|
||||||
|
"""
|
||||||
|
|
||||||
|
ext = "bat" if OS == "Windows" else "sh"
|
||||||
|
|
||||||
|
#scripts = ['invoke', 'update']
|
||||||
|
scripts = ['invoke']
|
||||||
|
|
||||||
|
for script in scripts:
|
||||||
|
src = Path(__file__).parent / '..' / "templates" / f"{script}.{ext}.in"
|
||||||
|
dest = self.runtime / f"{script}.{ext}"
|
||||||
|
shutil.copy(src, dest)
|
||||||
|
os.chmod(dest, 0o0755)
|
||||||
|
|
||||||
|
def update(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def remove(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
### Utility functions ###
|
||||||
|
|
||||||
|
|
||||||
|
def get_pip_from_venv(venv_path: Path) -> str:
|
||||||
|
"""
|
||||||
|
Given a path to a virtual environment, get the absolute path to the `pip` executable
|
||||||
|
in a cross-platform fashion. Does not validate that the pip executable
|
||||||
|
actually exists in the virtualenv.
|
||||||
|
|
||||||
|
:param venv_path: Path to the virtual environment
|
||||||
|
:type venv_path: Path
|
||||||
|
:return: Absolute path to the pip executable
|
||||||
|
:rtype: str
|
||||||
|
"""
|
||||||
|
|
||||||
|
pip = "Scripts\pip.exe" if OS == "Windows" else "bin/pip"
|
||||||
|
return str(venv_path.expanduser().resolve() / pip)
|
||||||
|
|
||||||
|
|
||||||
|
def set_sys_path(venv_path: Path) -> None:
|
||||||
|
"""
|
||||||
|
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
|
||||||
|
such that packages from the given venv may be imported in the current process.
|
||||||
|
Ensure that the packages from system environment are not visible (emulate
|
||||||
|
the virtual env 'activate' script) - this doesn't work on Windows yet.
|
||||||
|
|
||||||
|
:param venv_path: Path to the virtual environment
|
||||||
|
:type venv_path: Path
|
||||||
|
"""
|
||||||
|
|
||||||
|
# filter out any paths in sys.path that may be system- or user-wide
|
||||||
|
# but leave the temporary bootstrap virtualenv as it contains packages we
|
||||||
|
# temporarily need at install time
|
||||||
|
sys.path = list(filter(
|
||||||
|
lambda p: not p.endswith("-packages")
|
||||||
|
or p.find(BOOTSTRAP_VENV_PREFIX) != -1,
|
||||||
|
sys.path
|
||||||
|
))
|
||||||
|
|
||||||
|
# determine site-packages/lib directory location for the venv
|
||||||
|
lib = "Lib" if OS == "Windows" else f"lib/python{sys.version_info.major}.{sys.version_info.minor}"
|
||||||
|
|
||||||
|
# add the site-packages location to the venv
|
||||||
|
sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve()))
|
||||||
|
|
||||||
|
|
||||||
|
def get_torch_source() -> (Union[str, None],str):
|
||||||
|
"""
|
||||||
|
Determine the extra index URL for pip to use for torch installation.
|
||||||
|
This depends on the OS and the graphics accelerator in use.
|
||||||
|
This is only applicable to Windows and Linux, since PyTorch does not
|
||||||
|
offer accelerated builds for macOS.
|
||||||
|
|
||||||
|
Prefer CUDA-enabled wheels if the user wasn't sure of their GPU, as it will fallback to CPU if possible.
|
||||||
|
|
||||||
|
A NoneType return means just go to PyPi.
|
||||||
|
|
||||||
|
:return: tuple consisting of (extra index url or None, optional modules to load or None)
|
||||||
|
:rtype: list
|
||||||
|
"""
|
||||||
|
|
||||||
|
from messages import graphical_accelerator
|
||||||
|
|
||||||
|
# device can be one of: "cuda", "rocm", "cpu", "idk"
|
||||||
|
device = graphical_accelerator()
|
||||||
|
|
||||||
|
url = None
|
||||||
|
optional_modules = None
|
||||||
|
if OS == "Linux":
|
||||||
|
if device == "rocm":
|
||||||
|
url = "https://download.pytorch.org/whl/rocm5.2"
|
||||||
|
elif device == "cpu":
|
||||||
|
url = "https://download.pytorch.org/whl/cpu"
|
||||||
|
|
||||||
|
if device == 'cuda':
|
||||||
|
url = 'https://download.pytorch.org/whl/cu117'
|
||||||
|
optional_modules = '[xformers]'
|
||||||
|
|
||||||
|
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13
|
||||||
|
|
||||||
|
return (url, optional_modules)
|
312
installer/lib/messages.py
Normal file
@ -0,0 +1,312 @@
|
|||||||
|
# Copyright (c) 2023 Eugene Brodsky (https://github.com/ebr)
|
||||||
|
"""
|
||||||
|
Installer user interaction
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import platform
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from prompt_toolkit import prompt
|
||||||
|
from prompt_toolkit.completion import PathCompleter
|
||||||
|
from prompt_toolkit.validation import Validator
|
||||||
|
from rich import box, print
|
||||||
|
from rich.console import Console, Group, group
|
||||||
|
from rich.panel import Panel
|
||||||
|
from rich.prompt import Confirm
|
||||||
|
from rich.style import Style
|
||||||
|
from rich.syntax import Syntax
|
||||||
|
from rich.text import Text
|
||||||
|
|
||||||
|
"""
|
||||||
|
INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/${INVOKEAI_VERSION}.zip
|
||||||
|
INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||||
|
TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
OS = platform.uname().system
|
||||||
|
ARCH = platform.uname().machine
|
||||||
|
|
||||||
|
if OS == "Windows":
|
||||||
|
# Windows terminals look better without a background colour
|
||||||
|
console = Console(style=Style(color="grey74"))
|
||||||
|
else:
|
||||||
|
console = Console(style=Style(color="grey74", bgcolor="grey19"))
|
||||||
|
|
||||||
|
|
||||||
|
def welcome():
|
||||||
|
|
||||||
|
@group()
|
||||||
|
def text():
|
||||||
|
if (platform_specific := _platform_specific_help()) != "":
|
||||||
|
yield platform_specific
|
||||||
|
yield ""
|
||||||
|
yield Text.from_markup("Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.", justify="center")
|
||||||
|
|
||||||
|
console.rule()
|
||||||
|
print(
|
||||||
|
Panel(
|
||||||
|
title="[bold wheat1]Welcome to the InvokeAI Installer",
|
||||||
|
renderable=text(),
|
||||||
|
box=box.DOUBLE,
|
||||||
|
expand=True,
|
||||||
|
padding=(1, 2),
|
||||||
|
style=Style(bgcolor="grey23", color="orange1"),
|
||||||
|
subtitle=f"[bold grey39]{OS}-{ARCH}",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
console.line()
|
||||||
|
|
||||||
|
def confirm_install(dest: Path) -> bool:
|
||||||
|
if dest.exists():
|
||||||
|
print(f":exclamation: Directory {dest} already exists :exclamation:")
|
||||||
|
dest_confirmed = Confirm.ask(
|
||||||
|
":stop_sign: Are you sure you want to (re)install in this location?",
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print(f"InvokeAI will be installed in {dest}")
|
||||||
|
dest_confirmed = not Confirm.ask(f"Would you like to pick a different location?", default=False)
|
||||||
|
console.line()
|
||||||
|
|
||||||
|
return dest_confirmed
|
||||||
|
|
||||||
|
|
||||||
|
def dest_path(dest=None) -> Path:
|
||||||
|
"""
|
||||||
|
Prompt the user for the destination path and create the path
|
||||||
|
|
||||||
|
:param dest: a filesystem path, defaults to None
|
||||||
|
:type dest: str, optional
|
||||||
|
:return: absolute path to the created installation directory
|
||||||
|
:rtype: Path
|
||||||
|
"""
|
||||||
|
|
||||||
|
if dest is not None:
|
||||||
|
dest = Path(dest).expanduser().resolve()
|
||||||
|
else:
|
||||||
|
dest = Path.cwd().expanduser().resolve()
|
||||||
|
prev_dest = dest.expanduser().resolve()
|
||||||
|
|
||||||
|
dest_confirmed = confirm_install(dest)
|
||||||
|
|
||||||
|
while not dest_confirmed:
|
||||||
|
|
||||||
|
# if the given destination already exists, the starting point for browsing is its parent directory.
|
||||||
|
# the user may have made a typo, or otherwise wants to place the root dir next to an existing one.
|
||||||
|
# if the destination dir does NOT exist, then the user must have changed their mind about the selection.
|
||||||
|
# since we can't read their mind, start browsing at Path.cwd().
|
||||||
|
browse_start = (prev_dest.parent if prev_dest.exists() else Path.cwd()).expanduser().resolve()
|
||||||
|
|
||||||
|
path_completer = PathCompleter(
|
||||||
|
only_directories=True,
|
||||||
|
expanduser=True,
|
||||||
|
get_paths=lambda: [browse_start],
|
||||||
|
# get_paths=lambda: [".."].extend(list(browse_start.iterdir()))
|
||||||
|
)
|
||||||
|
|
||||||
|
console.line()
|
||||||
|
print(f"[orange3]Please select the destination directory for the installation:[/] \[{browse_start}]: ")
|
||||||
|
selected = prompt(
|
||||||
|
f">>> ",
|
||||||
|
complete_in_thread=True,
|
||||||
|
completer=path_completer,
|
||||||
|
default=str(browse_start) + os.sep,
|
||||||
|
vi_mode=True,
|
||||||
|
complete_while_typing=True
|
||||||
|
# Test that this is not needed on Windows
|
||||||
|
# complete_style=CompleteStyle.READLINE_LIKE,
|
||||||
|
)
|
||||||
|
prev_dest = dest
|
||||||
|
dest = Path(selected)
|
||||||
|
console.line()
|
||||||
|
|
||||||
|
dest_confirmed = confirm_install(dest.expanduser().resolve())
|
||||||
|
|
||||||
|
if not dest_confirmed:
|
||||||
|
dest = prev_dest
|
||||||
|
|
||||||
|
dest = dest.expanduser().resolve()
|
||||||
|
|
||||||
|
try:
|
||||||
|
dest.mkdir(exist_ok=True, parents=True)
|
||||||
|
return dest
|
||||||
|
except PermissionError as exc:
|
||||||
|
print(
|
||||||
|
f"Failed to create directory {dest} due to insufficient permissions",
|
||||||
|
style=Style(color="red"),
|
||||||
|
highlight=True,
|
||||||
|
)
|
||||||
|
except OSError as exc:
|
||||||
|
console.print_exception(exc)
|
||||||
|
|
||||||
|
if Confirm.ask("Would you like to try again?"):
|
||||||
|
dest_path(init_path)
|
||||||
|
else:
|
||||||
|
console.rule("Goodbye!")
|
||||||
|
|
||||||
|
|
||||||
|
def graphical_accelerator():
|
||||||
|
"""
|
||||||
|
Prompt the user to select the graphical accelerator in their system
|
||||||
|
This does not validate user's choices (yet), but only offers choices
|
||||||
|
valid for the platform.
|
||||||
|
CUDA is the fallback.
|
||||||
|
We may be able to detect the GPU driver by shelling out to `modprobe` or `lspci`,
|
||||||
|
but this is not yet supported or reliable. Also, some users may have exotic preferences.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if ARCH == "arm64" and OS != "Darwin":
|
||||||
|
print(f"Only CPU acceleration is available on {ARCH} architecture. Proceeding with that.")
|
||||||
|
return "cpu"
|
||||||
|
|
||||||
|
nvidia = (
|
||||||
|
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
|
||||||
|
"cuda",
|
||||||
|
)
|
||||||
|
amd = (
|
||||||
|
"an [gold1 b]AMD[/] GPU (using ROCm™)",
|
||||||
|
"rocm",
|
||||||
|
)
|
||||||
|
cpu = (
|
||||||
|
"no compatible GPU, or specifically prefer to use the CPU",
|
||||||
|
"cpu",
|
||||||
|
)
|
||||||
|
idk = (
|
||||||
|
"I'm not sure what to choose",
|
||||||
|
"idk",
|
||||||
|
)
|
||||||
|
|
||||||
|
if OS == "Windows":
|
||||||
|
options = [nvidia, cpu]
|
||||||
|
if OS == "Linux":
|
||||||
|
options = [nvidia, amd, cpu]
|
||||||
|
elif OS == "Darwin":
|
||||||
|
options = [cpu]
|
||||||
|
# future CoreML?
|
||||||
|
|
||||||
|
if len(options) == 1:
|
||||||
|
print(f'Your platform [gold1]{OS}-{ARCH}[/] only supports the "{options[0][1]}" driver. Proceeding with that.')
|
||||||
|
return options[0][1]
|
||||||
|
|
||||||
|
# "I don't know" is always added the last option
|
||||||
|
options.append(idk)
|
||||||
|
|
||||||
|
options = {str(i): opt for i, opt in enumerate(options, 1)}
|
||||||
|
|
||||||
|
console.rule(":space_invader: GPU (Graphics Card) selection :space_invader:")
|
||||||
|
console.print(
|
||||||
|
Panel(
|
||||||
|
Group(
|
||||||
|
"\n".join(
|
||||||
|
[
|
||||||
|
f"Detected the [gold1]{OS}-{ARCH}[/] platform",
|
||||||
|
"",
|
||||||
|
"See [deep_sky_blue1]https://invoke-ai.github.io/InvokeAI/#system[/] to ensure your system meets the minimum requirements.",
|
||||||
|
"",
|
||||||
|
"[red3]🠶[/] [b]Your GPU drivers must be correctly installed before using InvokeAI![/] [red3]🠴[/]",
|
||||||
|
]
|
||||||
|
),
|
||||||
|
"",
|
||||||
|
"Please select the type of GPU installed in your computer.",
|
||||||
|
Panel(
|
||||||
|
"\n".join([f"[dark_goldenrod b i]{i}[/] [dark_red]🢒[/]{opt[0]}" for (i, opt) in options.items()]),
|
||||||
|
box=box.MINIMAL,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
box=box.MINIMAL,
|
||||||
|
padding=(1, 1),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
choice = prompt(
|
||||||
|
"Please make your selection: ",
|
||||||
|
validator=Validator.from_callable(
|
||||||
|
lambda n: n in options.keys(), error_message="Please select one the above options"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
if options[choice][1] == "idk":
|
||||||
|
console.print(
|
||||||
|
"No problem. We will try to install a version that [i]should[/i] be compatible. :crossed_fingers:"
|
||||||
|
)
|
||||||
|
|
||||||
|
return options[choice][1]
|
||||||
|
|
||||||
|
|
||||||
|
def simple_banner(message: str) -> None:
|
||||||
|
"""
|
||||||
|
A simple banner with a message, defined here for styling consistency
|
||||||
|
|
||||||
|
:param message: The message to display
|
||||||
|
:type message: str
|
||||||
|
"""
|
||||||
|
|
||||||
|
console.rule(message)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO this does not yet work correctly
|
||||||
|
def windows_long_paths_registry() -> None:
|
||||||
|
"""
|
||||||
|
Display a message about applying the Windows long paths registry fix
|
||||||
|
"""
|
||||||
|
|
||||||
|
with open(str(Path(__file__).parent / "WinLongPathsEnabled.reg"), "r", encoding="utf-16le") as code:
|
||||||
|
syntax = Syntax(code.read(), line_numbers=True)
|
||||||
|
|
||||||
|
console.print(
|
||||||
|
Panel(
|
||||||
|
Group(
|
||||||
|
"\n".join(
|
||||||
|
[
|
||||||
|
"We will now apply a registry fix to enable long paths on Windows. InvokeAI needs this to function correctly. We are asking your permission to modify the Windows Registry on your behalf.",
|
||||||
|
"",
|
||||||
|
"This is the change that will be applied:",
|
||||||
|
syntax,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
),
|
||||||
|
title="Windows Long Paths registry fix",
|
||||||
|
box=box.HORIZONTALS,
|
||||||
|
padding=(1, 1),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def introduction() -> None:
|
||||||
|
"""
|
||||||
|
Display a banner when starting configuration of the InvokeAI application
|
||||||
|
"""
|
||||||
|
|
||||||
|
console.rule()
|
||||||
|
|
||||||
|
console.print(
|
||||||
|
Panel(
|
||||||
|
title=":art: Configuring InvokeAI :art:",
|
||||||
|
renderable=Group(
|
||||||
|
"",
|
||||||
|
"[b]This script will:",
|
||||||
|
"",
|
||||||
|
"1. Configure the InvokeAI application directory",
|
||||||
|
"2. Help download the Stable Diffusion weight files",
|
||||||
|
" and other large models that are needed for text to image generation",
|
||||||
|
"3. Create initial configuration files.",
|
||||||
|
"",
|
||||||
|
"[i]At any point you may interrupt this program and resume later.",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
console.line(2)
|
||||||
|
|
||||||
|
def _platform_specific_help()->str:
|
||||||
|
if OS == "Darwin":
|
||||||
|
text = Text.from_markup("""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/].""")
|
||||||
|
elif OS == "Windows":
|
||||||
|
text = Text.from_markup("""[b wheat1]Windows Users![/]\n\nBefore you start, please do the following:
|
||||||
|
1. Double-click on the file [b wheat1]WinLongPathsEnabled.reg[/] in order to
|
||||||
|
enable long path support on your system.
|
||||||
|
2. Make sure you have the [b wheat1]Visual C++ core libraries[/] installed. If not, install from
|
||||||
|
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]""")
|
||||||
|
else:
|
||||||
|
text = ""
|
||||||
|
return text
|
@ -1,296 +0,0 @@
|
|||||||
# Copyright (c) 2023 Eugene Brodsky (https://github.com/ebr)
|
|
||||||
"""
|
|
||||||
Installer user interaction
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from prompt_toolkit import prompt
|
|
||||||
from prompt_toolkit.completion import PathCompleter
|
|
||||||
from prompt_toolkit.shortcuts import CompleteStyle
|
|
||||||
from prompt_toolkit.validation import Validator
|
|
||||||
from rich import box, print
|
|
||||||
from rich.console import Console, Group
|
|
||||||
from rich.panel import Panel
|
|
||||||
from rich.prompt import Confirm
|
|
||||||
from rich.style import Style
|
|
||||||
from rich.syntax import Syntax
|
|
||||||
from rich.text import Text
|
|
||||||
|
|
||||||
"""
|
|
||||||
INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/${INVOKEAI_VERSION}.zip
|
|
||||||
INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
|
||||||
TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
OS = platform.uname().system
|
|
||||||
ARCH = platform.uname().machine
|
|
||||||
|
|
||||||
if OS == "Windows":
|
|
||||||
# Windows terminals look better without a background colour
|
|
||||||
console = Console(style=Style(color="grey74"))
|
|
||||||
else:
|
|
||||||
console = Console(style=Style(color="grey74", bgcolor="grey19"))
|
|
||||||
|
|
||||||
|
|
||||||
def welcome():
|
|
||||||
console.rule()
|
|
||||||
print(
|
|
||||||
Panel(
|
|
||||||
title="[bold wheat1]Welcome to the InvokeAI Installer",
|
|
||||||
renderable=Text(
|
|
||||||
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry.",
|
|
||||||
justify="center",
|
|
||||||
),
|
|
||||||
box=box.DOUBLE,
|
|
||||||
width=80,
|
|
||||||
expand=False,
|
|
||||||
padding=(1, 2),
|
|
||||||
style=Style(bgcolor="grey23", color="orange1"),
|
|
||||||
subtitle=f"[bold grey39]{OS}-{ARCH}",
|
|
||||||
)
|
|
||||||
)
|
|
||||||
console.line()
|
|
||||||
|
|
||||||
def confirm_install(dest: Path) -> bool:
|
|
||||||
if dest.exists():
|
|
||||||
print(f":exclamation: Directory {dest} already exists :exclamation:")
|
|
||||||
dest_confirmed = Confirm.ask(
|
|
||||||
":stop_sign: Are you sure you want to (re)install in this location?",
|
|
||||||
default=False,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
print(f"InvokeAI will be installed in {dest}")
|
|
||||||
dest_confirmed = not Confirm.ask(f"Would you like to pick a different location?", default=False)
|
|
||||||
console.line()
|
|
||||||
|
|
||||||
return dest_confirmed
|
|
||||||
|
|
||||||
|
|
||||||
def dest_path(dest=None) -> Path:
|
|
||||||
"""
|
|
||||||
Prompt the user for the destination path and create the path
|
|
||||||
|
|
||||||
:param dest: a filesystem path, defaults to None
|
|
||||||
:type dest: str, optional
|
|
||||||
:return: absolute path to the created installation directory
|
|
||||||
:rtype: Path
|
|
||||||
"""
|
|
||||||
|
|
||||||
if dest is not None:
|
|
||||||
dest = Path(dest).expanduser().resolve()
|
|
||||||
else:
|
|
||||||
dest = Path.cwd().expanduser().resolve()
|
|
||||||
prev_dest = dest.expanduser().resolve()
|
|
||||||
|
|
||||||
dest_confirmed = confirm_install(dest)
|
|
||||||
|
|
||||||
while not dest_confirmed:
|
|
||||||
|
|
||||||
# if the given destination already exists, the starting point for browsing is its parent directory.
|
|
||||||
# the user may have made a typo, or otherwise wants to place the root dir next to an existing one.
|
|
||||||
# if the destination dir does NOT exist, then the user must have changed their mind about the selection.
|
|
||||||
# since we can't read their mind, start browsing at Path.cwd().
|
|
||||||
browse_start = (prev_dest.parent if prev_dest.exists() else Path.cwd()).expanduser().resolve()
|
|
||||||
|
|
||||||
path_completer = PathCompleter(
|
|
||||||
only_directories=True,
|
|
||||||
expanduser=True,
|
|
||||||
get_paths=lambda: [browse_start],
|
|
||||||
# get_paths=lambda: [".."].extend(list(browse_start.iterdir()))
|
|
||||||
)
|
|
||||||
|
|
||||||
console.line()
|
|
||||||
print(f"[orange3]Please select the destination directory for the installation:[/] \[{browse_start}]: ")
|
|
||||||
selected = prompt(
|
|
||||||
f">>> ",
|
|
||||||
complete_in_thread=True,
|
|
||||||
completer=path_completer,
|
|
||||||
default=str(browse_start) + os.sep,
|
|
||||||
vi_mode=True,
|
|
||||||
complete_while_typing=True
|
|
||||||
# Test that this is not needed on Windows
|
|
||||||
# complete_style=CompleteStyle.READLINE_LIKE,
|
|
||||||
)
|
|
||||||
prev_dest = dest
|
|
||||||
dest = Path(selected)
|
|
||||||
console.line()
|
|
||||||
|
|
||||||
dest_confirmed = confirm_install(dest.expanduser().resolve())
|
|
||||||
|
|
||||||
if not dest_confirmed:
|
|
||||||
dest = prev_dest
|
|
||||||
|
|
||||||
dest = dest.expanduser().resolve()
|
|
||||||
|
|
||||||
try:
|
|
||||||
dest.mkdir(exist_ok=True, parents=True)
|
|
||||||
return dest
|
|
||||||
except PermissionError as exc:
|
|
||||||
print(
|
|
||||||
f"Failed to create directory {dest} due to insufficient permissions",
|
|
||||||
style=Style(color="red"),
|
|
||||||
highlight=True,
|
|
||||||
)
|
|
||||||
except OSError as exc:
|
|
||||||
console.print_exception(exc)
|
|
||||||
|
|
||||||
if Confirm.ask("Would you like to try again?"):
|
|
||||||
dest_path(init_path)
|
|
||||||
else:
|
|
||||||
console.rule("Goodbye!")
|
|
||||||
|
|
||||||
|
|
||||||
def graphical_accelerator():
|
|
||||||
"""
|
|
||||||
Prompt the user to select the graphical accelerator in their system
|
|
||||||
This does not validate user's choices (yet), but only offers choices
|
|
||||||
valid for the platform.
|
|
||||||
CUDA is the fallback.
|
|
||||||
We may be able to detect the GPU driver by shelling out to `modprobe` or `lspci`,
|
|
||||||
but this is not yet supported or reliable. Also, some users may have exotic preferences.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if ARCH == "arm64" and OS != "Darwin":
|
|
||||||
print(f"Only CPU acceleration is available on {ARCH} architecture. Proceeding with that.")
|
|
||||||
return "cpu"
|
|
||||||
|
|
||||||
nvidia = (
|
|
||||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
|
|
||||||
"cuda",
|
|
||||||
)
|
|
||||||
amd = (
|
|
||||||
"an [gold1 b]AMD[/] GPU (using ROCm™)",
|
|
||||||
"rocm",
|
|
||||||
)
|
|
||||||
cpu = (
|
|
||||||
"no compatible GPU, or specifically prefer to use the CPU",
|
|
||||||
"cpu",
|
|
||||||
)
|
|
||||||
idk = (
|
|
||||||
"I'm not sure what to choose",
|
|
||||||
"idk",
|
|
||||||
)
|
|
||||||
|
|
||||||
if OS == "Windows":
|
|
||||||
options = [nvidia, cpu]
|
|
||||||
if OS == "Linux":
|
|
||||||
options = [nvidia, amd, cpu]
|
|
||||||
elif OS == "Darwin":
|
|
||||||
options = [cpu]
|
|
||||||
# future CoreML?
|
|
||||||
|
|
||||||
if len(options) == 1:
|
|
||||||
print(f'Your platform [gold1]{OS}-{ARCH}[/] only supports the "{options[0][1]}" driver. Proceeding with that.')
|
|
||||||
return options[0][1]
|
|
||||||
|
|
||||||
# "I don't know" is always added the last option
|
|
||||||
options.append(idk)
|
|
||||||
|
|
||||||
options = {str(i): opt for i, opt in enumerate(options, 1)}
|
|
||||||
|
|
||||||
console.rule(":space_invader: GPU (Graphics Card) selection :space_invader:")
|
|
||||||
console.print(
|
|
||||||
Panel(
|
|
||||||
Group(
|
|
||||||
"\n".join(
|
|
||||||
[
|
|
||||||
f"Detected the [gold1]{OS}-{ARCH}[/] platform",
|
|
||||||
"",
|
|
||||||
"See [steel_blue3]https://invoke-ai.github.io/InvokeAI/#system[/] to ensure your system meets the minimum requirements.",
|
|
||||||
"",
|
|
||||||
"[red3]🠶[/] [b]Your GPU drivers must be correctly installed before using InvokeAI![/] [red3]🠴[/]",
|
|
||||||
]
|
|
||||||
),
|
|
||||||
"",
|
|
||||||
"Please select the type of GPU installed in your computer.",
|
|
||||||
Panel(
|
|
||||||
"\n".join([f"[dark_goldenrod b i]{i}[/] [dark_red]🢒[/]{opt[0]}" for (i, opt) in options.items()]),
|
|
||||||
box=box.MINIMAL,
|
|
||||||
),
|
|
||||||
),
|
|
||||||
box=box.MINIMAL,
|
|
||||||
padding=(1, 1),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
choice = prompt(
|
|
||||||
"Please make your selection: ",
|
|
||||||
validator=Validator.from_callable(
|
|
||||||
lambda n: n in options.keys(), error_message="Please select one the above options"
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
if options[choice][1] == "idk":
|
|
||||||
console.print(
|
|
||||||
"No problem. We will try to install a version that [i]should[/i] be compatible. :crossed_fingers:"
|
|
||||||
)
|
|
||||||
|
|
||||||
return options[choice][1]
|
|
||||||
|
|
||||||
|
|
||||||
def simple_banner(message: str) -> None:
|
|
||||||
"""
|
|
||||||
A simple banner with a message, defined here for styling consistency
|
|
||||||
|
|
||||||
:param message: The message to display
|
|
||||||
:type message: str
|
|
||||||
"""
|
|
||||||
|
|
||||||
console.rule(message)
|
|
||||||
|
|
||||||
|
|
||||||
# TODO this does not yet work correctly
|
|
||||||
def windows_long_paths_registry() -> None:
|
|
||||||
"""
|
|
||||||
Display a message about applying the Windows long paths registry fix
|
|
||||||
"""
|
|
||||||
|
|
||||||
with open(str(Path(__file__).parent / "WinLongPathsEnabled.reg"), "r", encoding="utf-16le") as code:
|
|
||||||
syntax = Syntax(code.read(), line_numbers=True)
|
|
||||||
|
|
||||||
console.print(
|
|
||||||
Panel(
|
|
||||||
Group(
|
|
||||||
"\n".join(
|
|
||||||
[
|
|
||||||
"We will now apply a registry fix to enable long paths on Windows. InvokeAI needs this to function correctly. We are asking your permission to modify the Windows Registry on your behalf.",
|
|
||||||
"",
|
|
||||||
"This is the change that will be applied:",
|
|
||||||
syntax,
|
|
||||||
]
|
|
||||||
)
|
|
||||||
),
|
|
||||||
title="Windows Long Paths registry fix",
|
|
||||||
box=box.HORIZONTALS,
|
|
||||||
padding=(1, 1),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def introduction() -> None:
|
|
||||||
"""
|
|
||||||
Display a banner when starting configuration of the InvokeAI application
|
|
||||||
"""
|
|
||||||
|
|
||||||
console.rule()
|
|
||||||
|
|
||||||
console.print(
|
|
||||||
Panel(
|
|
||||||
title=":art: Configuring InvokeAI :art:",
|
|
||||||
renderable=Group(
|
|
||||||
"",
|
|
||||||
"[b]This script will:",
|
|
||||||
"",
|
|
||||||
"1. Configure the InvokeAI application directory",
|
|
||||||
"2. Help download the Stable Diffusion weight files",
|
|
||||||
" and other large models that are needed for text to image generation",
|
|
||||||
"3. Create initial configuration files.",
|
|
||||||
"",
|
|
||||||
"[i]At any point you may interrupt this program and resume later.",
|
|
||||||
),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
console.line(2)
|
|