Compare commits
1537 Commits
temp-v3.0.
...
bugfix/han
Author | SHA1 | Date | |
---|---|---|---|
b3abc7252d | |||
9986fce1a6 | |||
228f1d7f62 | |||
01a6378dc1 | |||
e01769294f | |||
16aa261e28 | |||
1dabf18d14 | |||
115d92b1ae | |||
f0d4c71960 | |||
3e48edda6f | |||
716b584f03 | |||
d43b843c23 | |||
f36b5990ed | |||
5706237ec7 | |||
163b22a7b3 | |||
c5aeb36230 | |||
5e77f0d93b | |||
d3acb81743 | |||
e0f2404c00 | |||
5ed7972e5f | |||
792131be01 | |||
fc278c5cb1 | |||
d7f6af1f07 | |||
ff9bd040cc | |||
17d5f7bebd | |||
30dae0f5aa | |||
161000cde6 | |||
de832f6862 | |||
21ba3c63de | |||
a948bd1310 | |||
2071972a8c | |||
5ed2f6e6c1 | |||
b77f6bd0ad | |||
34cc26a4ed | |||
9d6e4ff1fb | |||
85bbf65967 | |||
3726293258 | |||
8bd65be8c8 | |||
783442c40d | |||
8a147bd6e6 | |||
273994b742 | |||
3339ad4df8 | |||
c3b2a8cb27 | |||
daa780940b | |||
2289680ae1 | |||
cda85a0637 | |||
1d9801e7be | |||
3ecb1e580f | |||
6301e58a2e | |||
5dd552effa | |||
25ce505628 | |||
1dd07fb1eb | |||
e82c21b5ba | |||
50b93992cf | |||
f8e566d62a | |||
f588b95c7f | |||
67daf1751c | |||
7d80261d47 | |||
67cbfeb33d | |||
f7998b4be0 | |||
675c73c94f | |||
0a27b0379f | |||
0ef18b6477 | |||
6539ef7c9f | |||
14c9a1e4f3 | |||
64b0feca31 | |||
0be9a2d906 | |||
d925f721b9 | |||
4e5be1891a | |||
156d4ec3b2 | |||
c45a43519a | |||
763816ca0c | |||
83a7c9059f | |||
c5f069a255 | |||
cd169ee082 | |||
66b106f107 | |||
b10d745dae | |||
d20f98fb4f | |||
c9c150f850 | |||
a60e2b7c77 | |||
da6e5b2ba1 | |||
c65d497cbc | |||
a68d8fe203 | |||
5de2288cfa | |||
2ce70b4457 | |||
6c5f743e2b | |||
bb242c4e1e | |||
c9e246ed1b | |||
2175fe3823 | |||
f64fc2c8b7 | |||
3d1b5c57ea | |||
31b9538976 | |||
97c1545cca | |||
6a8a3b50bc | |||
5a816818dc | |||
1cb866d1fc | |||
29bcc4b595 | |||
ca2bb6f0cc | |||
1c8fc908b2 | |||
d397beaa47 | |||
60eea09629 | |||
5b7b1122cb | |||
dfc8d1bb10 | |||
f9fa62164e | |||
d47905d2fb | |||
03b1cde97d | |||
7162ff04df | |||
32b1e974ca | |||
82c3c7fc38 | |||
3dcbb79ef7 | |||
3b41104427 | |||
35bf7ee66d | |||
430e17a5d2 | |||
400d66fa5d | |||
800c481515 | |||
79ae9c4e64 | |||
0dc6cb0535 | |||
810fc19e43 | |||
e0e106367d | |||
14472dc09d | |||
e8095b73ae | |||
c979cf5ecc | |||
1b4dbd283e | |||
fb50a221f8 | |||
52e07db06b | |||
6643b5cec4 | |||
e8bf9ea058 | |||
ce3d37e829 | |||
8a61063e84 | |||
87ff96553a | |||
209bf105bc | |||
804dbeba34 | |||
067cd4dc2e | |||
feb4a3f242 | |||
4a886c0a4a | |||
8e500283b6 | |||
3205371654 | |||
d713620d9e | |||
c1300fa8b1 | |||
0976ddba23 | |||
3ebb806410 | |||
9f274c79dc | |||
88c08bbfc7 | |||
c2af124622 | |||
f972fe9836 | |||
dcfc883ab3 | |||
1d2bd6b8f7 | |||
f2777f5096 | |||
d3320dc4ee | |||
72db2ee352 | |||
60c3a4ad5e | |||
cf7a7928af | |||
1057314508 | |||
73a077956b | |||
5e1e50bd47 | |||
413fe566b8 | |||
c9b5f06c42 | |||
b53e432b0f | |||
88164447e9 | |||
1ac85fd049 | |||
ee6fc4ab1d | |||
9f793bdae8 | |||
a0eecaecd0 | |||
d532073f5b | |||
198e8c9d55 | |||
30367deeca | |||
e73298aea2 | |||
59279851a3 | |||
2965357d99 | |||
8bd32ee142 | |||
a4f892dcfb | |||
e675983e20 | |||
e9558f97c4 | |||
a1a611f8cb | |||
182dc859a0 | |||
c0240a8568 | |||
02bcff29e8 | |||
d4ed64df7d | |||
701f14c1e3 | |||
45bf2c7da6 | |||
67ada70a26 | |||
06bcc07f65 | |||
4410ecf62c | |||
9f6b9d4d23 | |||
b24e8dd829 | |||
25291a2e01 | |||
332f3930a5 | |||
ed466a99ec | |||
f68f8898c0 | |||
a0996b1c0a | |||
522ff4a042 | |||
a769f93be0 | |||
2c5ef92979 | |||
5d773dc94c | |||
088e3420e6 | |||
14efc95707 | |||
f48a2c5fd2 | |||
74ae4d7774 | |||
191203ea0c | |||
6aceae5c22 | |||
8c6b3efd39 | |||
4602efd598 | |||
f70c0936ca | |||
0d4de4cc63 | |||
1e855f8290 | |||
bb2787584d | |||
a04981b418 | |||
d7f16b7c87 | |||
4477e04d59 | |||
30e11b4b42 | |||
b93695b78f | |||
b01311813b | |||
5ae80fab87 | |||
c4291f2136 | |||
287d3c2b04 | |||
7fde19730e | |||
13575642d8 | |||
3f5370b284 | |||
d048eb5b20 | |||
dd7031a472 | |||
4160d5ef26 | |||
51bdf2fd19 | |||
6a44697911 | |||
7a1d0ec228 | |||
b5928fd411 | |||
2f345d1976 | |||
f5d0721fa8 | |||
c3b36cb61d | |||
189c430e46 | |||
b922ee566a | |||
89da69f647 | |||
138caa34de | |||
26c3378ede | |||
aa134a2db8 | |||
d0391cb430 | |||
c955ea9de0 | |||
fc29a5d439 | |||
7e9942dbab | |||
c003967eaa | |||
b28fcc6be5 | |||
418cdbabb7 | |||
18e61e92d9 | |||
de20711637 | |||
55e91b97be | |||
f79bbd2d6e | |||
e1c2c3905d | |||
03ac93bfc7 | |||
89da976949 | |||
57dafd294d | |||
e611baa4b4 | |||
fc448d5b6d | |||
e59954f956 | |||
e160cbb1e9 | |||
86c857b9c2 | |||
0a13d7d2c7 | |||
68da5c6d22 | |||
f82744b95e | |||
5a67bc68a1 | |||
61cf4d4c70 | |||
9d20a2d5a3 | |||
8b0ac451e3 | |||
470dbe75a2 | |||
b7d19b8130 | |||
3dc13221d8 | |||
35184dbd86 | |||
0868fc2558 | |||
92fb09c4df | |||
b4cf5496b6 | |||
a0e68705dd | |||
7cb49e65bd | |||
39fedb090b | |||
f36a691219 | |||
6a2eb1d2e4 | |||
13123daa3f | |||
c859eb865e | |||
8f5e2cbcc7 | |||
2aed6e2dba | |||
52b51a6088 | |||
52b24e01e2 | |||
1178fd8bd3 | |||
a0187cc9df | |||
2f656cc357 | |||
71f9ac9985 | |||
8bbdfc45fa | |||
3cbb1a7671 | |||
b74e0de74a | |||
e7e7793896 | |||
504bdac14a | |||
b76d2cd716 | |||
022b32c724 | |||
653b820da1 | |||
68232e642f | |||
4ba0bf4dcf | |||
5e4daf4bc6 | |||
7e0713c869 | |||
099d516ac0 | |||
b94f6a4a29 | |||
4caf63d53d | |||
6057229ceb | |||
6a2856e46f | |||
4dedd63b74 | |||
db74837eb1 | |||
892fe62264 | |||
3c79476785 | |||
dad364da17 | |||
37bc4f78d0 | |||
de0b43c81d | |||
ea1d2d6a4c | |||
fafe8ccc59 | |||
4b88cfac19 | |||
5fa13fba36 | |||
f28f761436 | |||
27d7889780 | |||
a1cf153097 | |||
d121eefa12 | |||
c92e25a6a7 | |||
8be03dead5 | |||
1197133d06 | |||
850458a554 | |||
e96ad41729 | |||
53cf518390 | |||
b00ace852d | |||
be72765d02 | |||
580d29257c | |||
5d068c1da1 | |||
8e2ccab1f0 | |||
6f478eef62 | |||
1ff1c370df | |||
5ef87ef2a6 | |||
d0709d4f4e | |||
2a081b0a27 | |||
d902533387 | |||
1174713223 | |||
4b1740ad19 | |||
e03c88ce32 | |||
b917ffecbe | |||
2967a78c5a | |||
aa25ea62a5 | |||
1ab0e86085 | |||
c9ddbb4241 | |||
415a1c7a4f | |||
84a4836ab7 | |||
dbd6c9c6ed | |||
4f95c077d4 | |||
0a4cbc4e16 | |||
d45b76fab4 | |||
9722135cda | |||
7366913a31 | |||
bd31b5606c | |||
2953dea4a0 | |||
f3fed0b10f | |||
db57d426d9 | |||
4536e4a8b6 | |||
426a7b900f | |||
cc571d9ab2 | |||
296c861e7d | |||
aa45d21fd2 | |||
ac42513da9 | |||
e2387546fe | |||
c8929b35f0 | |||
c000e270a0 | |||
8ff28da3b4 | |||
b7b376103c | |||
08d379bb29 | |||
74e644c4ba | |||
d4c36da3ee | |||
dfe0b73890 | |||
c0c8fa9a89 | |||
ad7139829c | |||
a24e63d440 | |||
59437a02c3 | |||
98a44d7fa1 | |||
07416753be | |||
630854ce26 | |||
b55c2b99a7 | |||
f81d36c95f | |||
26b7aadd32 | |||
8e7e3c2b4a | |||
f2e8b66be4 | |||
ff09fd30dc | |||
9fcc30c3d6 | |||
b29a6522ef | |||
936d19cd60 | |||
f25b6ee5d1 | |||
7dea079220 | |||
7fc08962fb | |||
71155d9e72 | |||
6ccd72349d | |||
30e12376d3 | |||
23c8a893e1 | |||
7d93329401 | |||
968fb655a4 | |||
80ec9f4131 | |||
f19def5f7b | |||
9e1dd8ac9c | |||
ebd68b7a6c | |||
68a231afea | |||
21ab650ac0 | |||
b501bd709f | |||
4082f25062 | |||
63d74b4ba6 | |||
da5907613b | |||
3a9201bd31 | |||
d6e2cb7cef | |||
0809e832d4 | |||
7269c9f02e | |||
d86d7e5c33 | |||
5d87578746 | |||
04aef021fc | |||
0fc08bb384 | |||
5779542084 | |||
ebda81e96e | |||
3fe332e85f | |||
3428ea1b3c | |||
6024fc7baf | |||
75c1c4ce5a | |||
ffa05a0bb3 | |||
a20e17330b | |||
4e83644433 | |||
604f0083f2 | |||
2a8a158823 | |||
f8c3db72e9 | |||
60815807f9 | |||
196fb0e014 | |||
eba668956d | |||
ee5ec023f4 | |||
d59661e0af | |||
f51e8eeae1 | |||
6e06935e75 | |||
f7f697849c | |||
8e17e29a5c | |||
12e9f17f7a | |||
cb7e56a9a3 | |||
1a710a4c12 | |||
d8d266d3be | |||
4716632c23 | |||
3c4150d153 | |||
b71b14d582 | |||
73481d4aec | |||
2c049a3b94 | |||
367de44a8b | |||
f5f378d04b | |||
823edbfdef | |||
29bbb27289 | |||
a23502f7ff | |||
ce64dbefce | |||
b47afdc3b5 | |||
cde9c3090f | |||
6924b04d7c | |||
83fbd4bdf2 | |||
6460dcc7e0 | |||
59aa009c93 | |||
59d2a012cd | |||
7e3b620830 | |||
e16b55816f | |||
895cb8637e | |||
fe5bceb1ed | |||
5d475a40f5 | |||
bca7ea1674 | |||
f27bb402fb | |||
dd32c632cd | |||
9e2e740033 | |||
d6362ce0bd | |||
2347a00a70 | |||
0b7dc721cf | |||
ac04a834ef | |||
bbca053b48 | |||
fcf2006502 | |||
ac0d0019bd | |||
2d922a0a65 | |||
8db14911d7 | |||
01bab58b20 | |||
7a57bc99cf | |||
d3b6d86e74 | |||
360b6cb286 | |||
8f9e9e639e | |||
6930d8ba41 | |||
7ad74e680d | |||
c56a6a4ddd | |||
afad764a00 | |||
49a72bd714 | |||
8cf14287b6 | |||
0db47dd5e7 | |||
71f6f77ae8 | |||
6f16229c41 | |||
0cc0d794d1 | |||
535639cb95 | |||
2250bca8d9 | |||
4ce39a5974 | |||
644e9287f0 | |||
6a5e0be022 | |||
707f0f7091 | |||
8e709fe05a | |||
154da609cb | |||
21975d6268 | |||
31035b3e63 | |||
6c05818887 | |||
77c5b051f0 | |||
4fdc4c15f9 | |||
1a4be78013 | |||
eb16ad3d6f | |||
1fee08639d | |||
7caaf40835 | |||
6bfe994622 | |||
8a6f03cd46 | |||
4ce9f9dc36 | |||
00297716d6 | |||
50c0dc71eb | |||
29ccc6a3d8 | |||
f92a5cbabc | |||
acbf10f7ba | |||
46d830b9fa | |||
db17ec7a4b | |||
6320d18846 | |||
37c8b9d06a | |||
7ba2108eb0 | |||
8aeeee4752 | |||
930de51910 | |||
b1b5c0d3b2 | |||
ebe717099e | |||
06245bc761 | |||
b4c0dafdc8 | |||
0cefacb3a2 | |||
baa5f75976 | |||
989aaedc7f | |||
93e08df849 | |||
4a43e1c1b8 | |||
2bbab9d94e | |||
a456f6e6f0 | |||
a408f562d6 | |||
cefdf9ed00 | |||
5413bf07e2 | |||
4cffe282bd | |||
ae8ffe9d51 | |||
870cc5b733 | |||
0b4eb888c5 | |||
11f1cb5391 | |||
1e2e26cfc2 | |||
e9bce6e1c3 | |||
799ef0e7c1 | |||
61c10a7ca8 | |||
93880223e6 | |||
271456b745 | |||
cecee33bc0 | |||
4f43eda09b | |||
011757c497 | |||
2700d0e769 | |||
d256d93a2a | |||
f3c8e986a5 | |||
48f5e4f313 | |||
5950ffe064 | |||
49ca949cd6 | |||
5d69f1cbf5 | |||
9169006171 | |||
28b74523d0 | |||
9359c03c3c | |||
598241e0f2 | |||
e698a8006c | |||
34e7b5a7fb | |||
5c3dd62ae0 | |||
7e2eeec1f3 | |||
7eb79266c4 | |||
5d4610d981 | |||
7c548c5bf3 | |||
2a38606342 | |||
793cf39964 | |||
ab3e689ee0 | |||
20f497054f | |||
6209fef63d | |||
5168415999 | |||
b490c8ae27 | |||
6f354f16ba | |||
e108a2302e | |||
2ffecef792 | |||
2663a07e94 | |||
8d2ef5afc3 | |||
539887b215 | |||
2ba505cce9 | |||
bd92a31d15 | |||
ee2529f3fd | |||
89b7082bc0 | |||
55dfabb892 | |||
2a41fd0b29 | |||
966919ea4a | |||
d3acdcf12f | |||
52f9749bf5 | |||
2a661450c3 | |||
2d96c62fdb | |||
3e6173ee8c | |||
4e9841c924 | |||
f4ea495d23 | |||
43a4b815e8 | |||
4134f18319 | |||
cd292f6c1c | |||
3ce8f3d6fe | |||
10fd4f6a61 | |||
47b1fd4bce | |||
300805a25a | |||
56527da73e | |||
ca4b8e65c1 | |||
f5194f9e2d | |||
ccbbb417f9 | |||
37786a26a5 | |||
4f2930412e | |||
83049a3a5b | |||
38256f97b3 | |||
77f2aabda4 | |||
e32eb2a649 | |||
f4cdfa3b9c | |||
e99b715e9e | |||
ed96c40239 | |||
1b3bb932b9 | |||
f0b102d830 | |||
a47d91f0e7 | |||
358c1f5791 | |||
faec320d48 | |||
fd074abdc4 | |||
d8eb58cd58 | |||
8937d66412 | |||
a6935ae7fb | |||
69968eb67b | |||
e57f5f129c | |||
1b8651fa26 | |||
f6664960ca | |||
84a001720c | |||
c9951cd86b | |||
83a9e26cd8 | |||
80812cf7cd | |||
2a6c940047 | |||
78fe9b642d | |||
53b835945f | |||
acba51c888 | |||
daa9d50d95 | |||
e38d0e39b7 | |||
2c632a811b | |||
6afeb37ce5 | |||
85726c164b | |||
17e1ef0140 | |||
cdfc01d938 | |||
dc632a787a | |||
4e04ea0c0d | |||
f51bb00b5e | |||
12f2357e70 | |||
60629cba3c | |||
5196e4bc38 | |||
89e7848079 | |||
5b38b5ea7f | |||
88c1af969f | |||
fbede84405 | |||
756cb9c27e | |||
78b29db458 | |||
1225c3fb47 | |||
4957a360ff | |||
32ad742f3e | |||
41cd40541a | |||
2d11d97dad | |||
64858b2523 | |||
d5134325f6 | |||
702d0f68af | |||
a0d0e9f474 | |||
475823835f | |||
b95d547ccc | |||
9b4758f02f | |||
8d2952695d | |||
8dd55cc45e | |||
562fb1f3a1 | |||
21ed2d42cd | |||
79cf3ec9a5 | |||
37b76caccf | |||
a4f9bfc8f7 | |||
9afdd0f4a8 | |||
bee6ad1547 | |||
fa3f1b6e41 | |||
d0fa131010 | |||
2f438431bd | |||
bbeb5cb477 | |||
cd3111c324 | |||
16b7246412 | |||
42be78d328 | |||
e469e24a58 | |||
cb698ff1fb | |||
45470a3ac8 | |||
0e738c4290 | |||
09d1bc513d | |||
b6ed4ba559 | |||
aefa828237 | |||
74ea592d02 | |||
457b0dfac0 | |||
96a717c4ba | |||
77b74264a8 | |||
351078e8aa | |||
b8354bd1a4 | |||
3b944b8af6 | |||
b811c037bd | |||
5bf61382a4 | |||
0f1c5f382a | |||
4af1695c60 | |||
df9a903a50 | |||
311be8f97d | |||
3f970c8326 | |||
fc150acde5 | |||
1615df3aa1 | |||
b2a8c45553 | |||
212dbaf9a2 | |||
ac3cf48d7f | |||
454f01e0c1 | |||
72dca55e44 | |||
264ea6d94d | |||
60e3e653fa | |||
082894c377 | |||
4b00f8fc82 | |||
6ea09ba0b6 | |||
296060db63 | |||
d1d8ee71fc | |||
42c04db167 | |||
b935768eeb | |||
ea4ef042f3 | |||
18b2bcbbee | |||
5ad88c7f86 | |||
3b04fef31d | |||
bec888923a | |||
c6235049c7 | |||
e10f6e8962 | |||
77f04ff8d6 | |||
461e474394 | |||
f0c70fe3f1 | |||
442ac2b828 | |||
bb986b97f3 | |||
98655db57b | |||
8845894e83 | |||
937c7e957d | |||
569ae7c482 | |||
340957f920 | |||
076d9b05ea | |||
2b54e240d4 | |||
5127e9df2d | |||
42329a1849 | |||
42bc6ef154 | |||
6c6c45c3da | |||
f76b04a3b8 | |||
821e0326c9 | |||
cc18d86f29 | |||
ed1583383e | |||
c50a49719b | |||
ebf5f5d418 | |||
386b656530 | |||
d7cede6c28 | |||
15de7c21d9 | |||
9620f9336c | |||
a64ced7b29 | |||
dd7deff1a3 | |||
612912a6c9 | |||
bca2372280 | |||
0b860582f0 | |||
87ff380fe4 | |||
2cdda1fda2 | |||
6caa70123d | |||
7e831c8a96 | |||
3d64bc886d | |||
1a136d6167 | |||
43f2837117 | |||
5f77ef7e99 | |||
22ccaa4e9a | |||
d277bd3c38 | |||
fd4e041e7c | |||
15a3e8076f | |||
2fbe3a3104 | |||
b0cfa58526 | |||
285ed26edd | |||
02565b9a00 | |||
78a6024d6c | |||
95198da645 | |||
ee1f1f3363 | |||
18ba7feca1 | |||
55b0c7cdc9 | |||
713a83e7da | |||
f3a97e06ec | |||
50815d36c6 | |||
a69f518c76 | |||
18093c4f1d | |||
0cf7fe43af | |||
6063760ce2 | |||
c5ba4f2ea5 | |||
3414437eea | |||
417db71471 | |||
afe4e55bf9 | |||
55acc16b2d | |||
535ce10e99 | |||
11f4a48144 | |||
67ed4a0245 | |||
fbbc1037cd | |||
0852fd4e88 | |||
c84526fae5 | |||
f762940335 | |||
fefb78795f | |||
ef8284f009 | |||
290851016e | |||
fa7d002175 | |||
f1b6f78319 | |||
26ab917021 | |||
4f3c32a2ee | |||
77065b1ce1 | |||
41db92b9e8 | |||
c823f5667b | |||
3227b30430 | |||
567f107a81 | |||
b3d5955bc7 | |||
8726b203d4 | |||
b3f92e0547 | |||
72c9a7663f | |||
fcb9e89bd7 | |||
56966d6d05 | |||
e46dc9b34e | |||
e461f9925e | |||
abeb1bd3b3 | |||
83e820d721 | |||
f8e4b93a74 | |||
0710ec30cf | |||
c382329e8c | |||
a2dc780188 | |||
abc9dc4d17 | |||
3c692018cd | |||
3ba3c1918c | |||
f2c6819d68 | |||
ef807cf63a | |||
bbcd58e681 | |||
36043bf38b | |||
fd68c47920 | |||
c5c975c7a9 | |||
41ad13c282 | |||
e9d7e6bdd5 | |||
49b74d189e | |||
179bc64490 | |||
1feab3da37 | |||
0a15f3fc35 | |||
daf00efa4d | |||
55cfb879d0 | |||
de2879f602 | |||
3b1ff4a7f4 | |||
d7f7fbc8c2 | |||
e2567a7e31 | |||
2f3457c02a | |||
aab6369ffe | |||
4c97b619fb | |||
abdd840fb9 | |||
e656768eb2 | |||
494c2a9b05 | |||
40d4c7c8e1 | |||
076284c26f | |||
1af4260ab6 | |||
08ef71a74e | |||
8f6e2c0c85 | |||
0ac33f36ef | |||
9661fa5f76 | |||
ca07449fb4 | |||
fb39f621c6 | |||
977d309692 | |||
72cb8b83fe | |||
99f14b1dfe | |||
95a3c89a56 | |||
b271474812 | |||
2272925607 | |||
5902a52e40 | |||
5140056b59 | |||
f17b3d0068 | |||
5b9d25f57e | |||
73dbb8792e | |||
fc6cebb975 | |||
06104f3851 | |||
6e028d691a | |||
6d176601cc | |||
4627a7c75f | |||
d9a0efb20b | |||
7436aa8e3a | |||
d75d3885c3 | |||
db4763a742 | |||
13c9f8ffb7 | |||
e4f67628c0 | |||
283bb73418 | |||
5b5a71d40c | |||
61060f032a | |||
3423b5848f | |||
fd8d1e13a0 | |||
c42d692ea6 | |||
5f37176938 | |||
375a91db32 | |||
b7ba426249 | |||
d3ad356c6a | |||
fdb97c1d02 | |||
8cda42ab0a | |||
fed2bdafeb | |||
9ba5752770 | |||
8648c2c42e | |||
b519b6e1e0 | |||
913c68982a | |||
6e1e67aa72 | |||
ee6fbabbfb | |||
db58efbe65 | |||
cd15d8b7a9 | |||
3b4b4ba40a | |||
eecee472b1 | |||
7b314116be | |||
bc6d4111a2 | |||
674d9796d0 | |||
5816320645 | |||
14254e8be8 | |||
e990235d32 | |||
5f122186bd | |||
3bfaee9c57 | |||
1ca0901cbe | |||
2d7555b7b8 | |||
3c7d1fcd32 | |||
c7fa2db556 | |||
3b06cc6782 | |||
7c9f48b84d | |||
fed2bf6dab | |||
2b583ffcdf | |||
6f46d15c05 | |||
018ccebd6f | |||
620b2d477a | |||
f73b678aae | |||
0463541d99 | |||
e45704833e | |||
0fdcc0af65 | |||
4fc2ed7195 | |||
d0464a5793 | |||
bdb0d13a2d | |||
2d2ef5d72c | |||
fb9b471150 | |||
3f0e0af177 | |||
0228aba06f | |||
1fd6666682 | |||
cff6600ded | |||
04ddcf53f3 | |||
e46ac45741 | |||
75089b7a9d | |||
0539a64569 | |||
778fd55f0d | |||
5a3f1f2b22 | |||
f95ce1870c | |||
0719a46372 | |||
a8ef4e5be8 | |||
e6fe2540b8 | |||
aadcde3edd | |||
984e609c61 | |||
57e70aaf50 | |||
bfdef120d1 | |||
32da359ba5 | |||
b19ed36b43 | |||
e5a212b5c8 | |||
9b863fb9bc | |||
7cab51745b | |||
18c6ff427e | |||
843f2d71d6 | |||
67540c9ee0 | |||
7f816c9243 | |||
76b888de17 | |||
65a16be299 | |||
1c8ff0ae66 | |||
29eade4880 | |||
86fd1d5b22 | |||
909b78a1cb | |||
2f81f9fb22 | |||
a6d4e4ed57 | |||
3e01c396e1 | |||
0beb08686c | |||
693c6cf5e4 | |||
bb87c988cb | |||
049b0239da | |||
932de08fc0 | |||
303791d5c6 | |||
7e4a689370 | |||
04e0fefdee | |||
9b4e6da226 | |||
e1c53a2465 | |||
121b930abf | |||
436560da39 | |||
3980f79ed5 | |||
1d0dc7eeab | |||
1f63fa8236 | |||
caf47dee09 | |||
d742479810 | |||
77933a0a85 | |||
2a087bf161 | |||
b0fe57ec80 | |||
09cb40786f | |||
18ecfc0521 | |||
59d932e9c1 | |||
578c8ce5dd | |||
3d4874dc34 | |||
5aaf2e8873 | |||
f3fd0f6d73 | |||
4468581d2e | |||
da642b7aad | |||
b379e3d187 | |||
6867c79185 | |||
a1705dc6b3 | |||
4af4486dd9 | |||
282a7f32d3 | |||
4c6a88a642 | |||
e41d0b9a76 | |||
a02090b06b | |||
0d9a546d74 | |||
8d99113bef | |||
4309f3bd58 | |||
42370939a8 | |||
654591cbf3 | |||
ad9c954a58 | |||
a703e1b3d3 | |||
e85f2254f0 | |||
8f2cf30191 | |||
296741306c | |||
5386a286fd | |||
803fb393bb | |||
ab944bd13a | |||
514c49d946 | |||
858bcdd3ff | |||
ed79980dd4 | |||
86a74e929a | |||
0d52430481 | |||
4eca802cdd | |||
ff0a25bd9c | |||
ace0eb366b | |||
ecd3dcd5df | |||
d971c5fa64 | |||
ae82df0fda | |||
e28262ebd9 | |||
250ee4b11c | |||
b7293d638b | |||
eee863e380 | |||
e509d719ee | |||
a79e814c8d | |||
1d8f44d356 | |||
7653d21cf5 | |||
46a2d83b84 | |||
79efc6789e | |||
2192210910 | |||
3fe1bef5cd | |||
84629df49c | |||
dbd0151c0e | |||
6da508f147 | |||
ef6b27ab35 | |||
8ef596eac7 | |||
8f4f4d48d5 | |||
60eae7443a | |||
8695ad6f59 | |||
dc5c452ef9 | |||
8aefe2cefe | |||
17420f76b3 | |||
ec510d34b5 | |||
45213aa631 | |||
4381dabbd9 | |||
b4a03fcf42 | |||
714be33850 | |||
5f23fc493d | |||
4fe93e521e | |||
6e6d903f99 | |||
667a2a3d84 | |||
f57b277d5a | |||
e62991c54d | |||
785d584603 | |||
da4aab9233 | |||
591b601fd3 | |||
19baea1883 | |||
80bc9be3ab | |||
8c7a7bc897 | |||
4aab728590 | |||
9cf060115d | |||
317b5ebae1 | |||
98a4930a52 | |||
1a596a5684 | |||
84a0a0fa14 | |||
da443973cb | |||
d073d10f9f | |||
2b7e7496f7 | |||
50ab677ea4 | |||
cb81558302 | |||
9259483081 | |||
4ece322f82 | |||
13e8fa733e | |||
3e473ae008 | |||
9ea3126118 | |||
6c56233edc | |||
487fda0226 | |||
74d3b22533 | |||
b5e018972f | |||
2af844385f | |||
540047e26e | |||
4d8b8a2db8 | |||
d581a3289b | |||
d756c9b10a | |||
63d3212bec | |||
136ff011b2 | |||
3bc15a96d5 | |||
43d5bb2038 | |||
8d39eab3a9 | |||
62da69b3e8 | |||
d2852c767b | |||
47f33f1ed1 | |||
1896c6fb44 | |||
47f3515745 | |||
950021a61e | |||
5ee55cf46f | |||
91ef24e15c | |||
230dfdb9ad | |||
6f719b2c7a | |||
02ce3bd303 | |||
4599517c6c | |||
cc747c066c | |||
3ba547a41a | |||
1a37827bdf | |||
16e990b6e6 | |||
be4f3fa5c6 | |||
d0375ec234 | |||
1bf8625b10 | |||
5d6040b636 | |||
ead1b14ee7 | |||
92a9355ddb | |||
7fcf475aec | |||
3f6e8e9d6b | |||
c9655236cc | |||
5cb3fdb64c | |||
ae749ada6e | |||
36b8549f3a | |||
b6f356f067 | |||
a4f1db7c02 | |||
21206bafcf | |||
a047bad391 | |||
909afc266e | |||
4039dd148d | |||
ea0f8b8791 | |||
f412582d60 | |||
c5672adb6b | |||
0e5c3a641a | |||
9015e72e1e | |||
6b05d27c7a | |||
19d0673085 | |||
048b4fe7e8 | |||
e8b83fecff | |||
8883ecb2bf | |||
2f97f1d6d5 | |||
73d6cc824b | |||
acc0a29dca | |||
38c1436f02 | |||
efbdb75568 | |||
8929495aeb | |||
428f0b265f | |||
7daee41ad2 | |||
7cdd7b6ad7 | |||
bc64cde6f9 | |||
4465f97cdf | |||
fface2cda7 | |||
7fcb8959fb | |||
dcf0dc4274 | |||
bb52861896 | |||
f2d26a3a3c | |||
04d8f2dfea | |||
355d4cf4e2 | |||
a3a828779a | |||
8c71ff37ae | |||
ddb65e6034 | |||
8366cd2a00 | |||
ab1ec3720a | |||
3a0ec635c9 | |||
8afe517204 | |||
5eaea9dd64 | |||
71e298b722 | |||
89a039460d | |||
a342e64772 | |||
ef8dcf5fae | |||
90a038c685 | |||
024a156114 | |||
7ea2a135f1 | |||
af2264b6eb | |||
41bf9ec4a3 | |||
520ccdb0a9 | |||
2b36565e9e | |||
f2c3b7c317 | |||
67751a01ab | |||
cb8cdefd59 | |||
f1c846ba5c | |||
3a6ba236f5 | |||
1c7ea57492 | |||
6494e8e551 | |||
513fceac82 | |||
99a8ebe3a0 | |||
3a136420d5 | |||
bd56e9bc81 | |||
43f2398e14 | |||
d0cf98d7f6 | |||
8111dd6cc5 | |||
99e4b87fae | |||
884ec0b5df | |||
9ccfa34e04 | |||
d5aa74623d | |||
d63a614b8b | |||
cbc905a4d6 | |||
b55fc2935e | |||
0544917161 | |||
1161dfe055 | |||
433f347d7e | |||
33a412a24f | |||
9316534d97 | |||
fdaa661245 | |||
f1c195afb7 | |||
6001d3d71d | |||
b9f607be56 | |||
8831d1ee41 | |||
a0be83e370 | |||
8702a63197 | |||
d7f0a7919f | |||
356b5a41a9 | |||
e56a6d85a9 | |||
e22a091d76 | |||
141d02939a | |||
5cb372e9d0 | |||
f95fe68753 | |||
6d33893844 | |||
fc53112d8e | |||
41f7aa6ab4 | |||
9bec755198 | |||
2570497d83 | |||
5d735a714d | |||
6aa87f973e | |||
f793fdf3d4 | |||
3b363d0258 | |||
36e0faea6b | |||
927f8a66e6 | |||
eebc0e7315 | |||
6b173cc66f | |||
b4732a7308 | |||
344a56327a | |||
2e404b7cca | |||
a760bdae9f | |||
4cfd55936c | |||
5c3a27aac6 | |||
d573a23090 | |||
351abd2ca2 | |||
9733cd4199 | |||
9976bc6908 | |||
c68db6e40f | |||
3a50798a52 | |||
a98426d2c6 | |||
504f426f0a | |||
840cbc1d39 | |||
014d6187ab | |||
9fb15fae87 | |||
a07336a020 | |||
0718cc2392 | |||
ce22c0fbaa | |||
935e4632c2 | |||
a83d8810c4 | |||
76b3f8956b | |||
ff8a8a1963 | |||
cb6d0c8851 | |||
67f2616d5a | |||
f8f1740668 | |||
e66d0f7372 | |||
546aaedbe4 | |||
55f8865524 | |||
2d051559d1 | |||
7f650d00de | |||
db9cef0092 | |||
72c34aea75 | |||
edeea5237b | |||
4e6b579526 | |||
6334c4adf5 | |||
66b2366efc | |||
e147379aa7 | |||
5a821384d3 | |||
2bbba323c6 | |||
aa02ebf8f5 | |||
fb3d0c4b12 | |||
8488ab0134 | |||
875231ed3d | |||
43b300498f | |||
5b420653f9 | |||
3d32ce2b58 | |||
e391f3c9a8 | |||
6e7a3f0546 | |||
4a683cc669 | |||
3781e56e57 | |||
267e709ba2 | |||
8ff49109a8 | |||
bac2a757e8 | |||
a4a7b601a1 | |||
fa7f6a6a10 | |||
e92b84955c | |||
61b17c475a | |||
379d68f595 | |||
545c811bf1 | |||
2ba5b44ec4 | |||
7f4ce518b7 | |||
6c66adcd90 | |||
584b513038 | |||
94055ae54a | |||
a79c86b901 | |||
ed81d6d533 | |||
63548c5ea7 | |||
8481db96ed | |||
bb68175fd0 | |||
316131f69b | |||
9721e1382d | |||
03a64275c6 | |||
55bfadfd0b | |||
224b09f8fd | |||
8dca194e2c | |||
3a33bd7881 | |||
d6d0fd313b | |||
95b90d22b5 | |||
249618f6b4 | |||
8109bc5316 | |||
015cec197b | |||
54b0c4f3c9 | |||
60a105103b | |||
67fb2c8129 | |||
09bb61f630 | |||
69ba3a7278 | |||
6e05292813 | |||
859e3d5a61 | |||
fe5d2c023b | |||
b6c259ab92 | |||
0fde82a24b | |||
4f74549f17 | |||
c95c6c5374 | |||
d946cb78e6 | |||
48fc07e049 | |||
5c9046580f | |||
d397e80e0d | |||
c04099a869 | |||
3b7e17c0cc | |||
6cbc69f3b7 | |||
c14aa30956 | |||
3546c41f4a | |||
8e948d3f17 | |||
02928298d9 | |||
df4dab53a8 | |||
8615d53e65 | |||
c8481d29eb | |||
b7a05734bb | |||
eeeb5dc451 | |||
3d33b3e1f5 | |||
7b066681f0 | |||
1177234931 | |||
824702de99 | |||
8604943e89 | |||
b7f63a4065 | |||
dcd11327c1 | |||
c071262c20 | |||
2f4f83280b | |||
301a8fef92 | |||
52fbd1b222 | |||
16dacb5f43 | |||
b5940039f3 | |||
9104979943 | |||
f04462973b | |||
2faed653d7 | |||
23fa2e560a | |||
0cda7943fa | |||
6d776bad7e | |||
86c3acf184 | |||
d32caf7cb1 | |||
e3e8d8af02 | |||
7b6e2bc37f | |||
bbae4045c9 | |||
8910e912c7 | |||
4012388f0a | |||
3c4f43314c | |||
5a163f02a6 | |||
f0db4d36e4 | |||
c2da74c587 | |||
575c7bbfd8 | |||
f102e38076 | |||
9195c8c957 | |||
677918df61 | |||
96e80c71fb | |||
da403ba04c | |||
e4c45012f4 | |||
ef14ba1713 | |||
9e06371178 | |||
a459786d73 | |||
fdf02c33d0 | |||
0a01d86ab1 | |||
5e6df975fd | |||
967a2dad54 | |||
a078efc0f2 | |||
024aa5eb90 | |||
67a343b3e4 | |||
d27392cc2d | |||
9fa8e38163 | |||
4b197cb6d4 | |||
252c9a5f5a | |||
975ba6b74f | |||
284a257c25 | |||
55ad4feb5c | |||
b7555ddae8 | |||
8afc47018b | |||
a97ec88e06 | |||
282d36b640 | |||
58a0709c1e | |||
c04fb451ee | |||
6e697b7b6f | |||
38e7eb8878 | |||
bdf4c4944c | |||
b146993553 | |||
fff29d663d | |||
06f8a3276d | |||
378689a519 | |||
f11ba81a8d | |||
9542883bb5 | |||
14e25bf277 | |||
c69715636d | |||
001bba1719 | |||
a094f4ca2b | |||
9d9592230a | |||
685cda89ff | |||
2c39557dc9 | |||
c238a7f18b | |||
19c5435332 | |||
3079c75a60 | |||
53b6f0dc73 | |||
70a1202deb | |||
9a1aea9caf | |||
388d36b839 | |||
bedb35af8c | |||
dc232438fb | |||
d7edf5aaad | |||
3ad1226d1e | |||
86ca9f122d | |||
2c6772f92f | |||
e6c1e03b8b | |||
c9d95e5758 | |||
10755718b8 | |||
459c7b3b74 | |||
353719f81d | |||
bd4b260c23 | |||
3e389d3f60 | |||
ffb01f1345 | |||
faa0a8236c | |||
e4d73d3659 | |||
6994783c17 | |||
3f9708f166 | |||
bcf0d8a590 | |||
2060ee22f2 | |||
3fd79b837f | |||
1c099e0abb | |||
95cca9493c | |||
779c902402 | |||
99e6bb48ba | |||
c3d6ff5b11 | |||
bba962b82f | |||
78b8cfede3 | |||
e9879b9e1f | |||
e21f3af5ab | |||
2ab7c5f783 | |||
8bbd938be9 | |||
b4cee46936 | |||
48626c40fd | |||
35ebc9e18d | |||
49279bbe74 | |||
8464450a53 | |||
a1001b6d10 | |||
50df641e1b | |||
22dd64dfa4 | |||
0a929ca3de | |||
8c61cda4b8 | |||
9db152bf75 | |||
75663ec81e | |||
40a568c060 | |||
8e7aa74a16 | |||
fcba4382b2 | |||
bf9f7271dd | |||
d3821594df | |||
631ad1596f | |||
dfe32e467d | |||
3575cf3b3b | |||
15cabc4968 | |||
29c3f49182 | |||
21d5969942 | |||
334dcf71c4 | |||
d2149a8380 | |||
6532d9ffa1 | |||
52274087f3 | |||
89db8c83c2 | |||
fc09ab7e13 | |||
9646157ad5 | |||
b89ec2b9c3 | |||
d2fb29cf0d | |||
d1fce4b70b | |||
f50f95a81d | |||
3611029057 | |||
402cf9b0ee | |||
88bee96ca3 | |||
5048fc7c9e | |||
2a35d93a4d | |||
5b2ed4ffb4 | |||
a49b8febed | |||
e543db5a5d | |||
670f3aa165 | |||
c0534d6519 | |||
7bc6c23dfa | |||
851ce36250 | |||
d631088566 | |||
f0bf733309 | |||
65af7dd8f8 | |||
74c666aaa2 | |||
45f9aca7e5 | |||
9fb624f390 | |||
962e51320b | |||
44932923eb | |||
ffcf6dfde6 | |||
be52eb153c | |||
bd97c6b708 | |||
9940cbfa87 | |||
77aeb9a421 | |||
2bad8b9f29 | |||
8e943b2ce1 | |||
5d3ab4f333 | |||
1047d08835 | |||
516cc258f9 | |||
7c2aa1dc20 | |||
035f1e12e1 | |||
4c93202ee4 | |||
227046bdb0 | |||
83b123f1f6 | |||
320ef15ee9 | |||
6905c61912 | |||
494bde785e | |||
732ab38ca6 | |||
ba38aa56a5 | |||
0a48c5a712 | |||
133ab91c8d | |||
7a672bd2b2 | |||
7dee6f51a3 | |||
3c029eee29 | |||
1a8f9d1ecb | |||
15b33ad501 |
1
.gitattributes
vendored
@ -2,3 +2,4 @@
|
|||||||
# Only affects text files and ignores other file types.
|
# Only affects text files and ignores other file types.
|
||||||
# For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/
|
# For more info see: https://www.aleksandrhovhannisyan.com/blog/crlf-vs-lf-normalizing-line-endings-in-git/
|
||||||
* text=auto
|
* text=auto
|
||||||
|
docker/** text eol=lf
|
6
.github/CODEOWNERS
vendored
@ -1,5 +1,5 @@
|
|||||||
# continuous integration
|
# continuous integration
|
||||||
/.github/workflows/ @lstein @blessedcoolant @hipsterusername
|
/.github/workflows/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||||
|
|
||||||
# documentation
|
# documentation
|
||||||
/docs/ @lstein @blessedcoolant @hipsterusername @Millu
|
/docs/ @lstein @blessedcoolant @hipsterusername @Millu
|
||||||
@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
# installation and configuration
|
# installation and configuration
|
||||||
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
/pyproject.toml @lstein @blessedcoolant @hipsterusername
|
||||||
/docker/ @lstein @blessedcoolant @hipsterusername
|
/docker/ @lstein @blessedcoolant @hipsterusername @ebr
|
||||||
/scripts/ @ebr @lstein @hipsterusername
|
/scripts/ @ebr @lstein @hipsterusername
|
||||||
/installer/ @lstein @ebr @hipsterusername
|
/installer/ @lstein @ebr @hipsterusername
|
||||||
/invokeai/assets @lstein @ebr @hipsterusername
|
/invokeai/assets @lstein @ebr @hipsterusername
|
||||||
@ -30,5 +30,3 @@
|
|||||||
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
/invokeai/frontend/merge @lstein @blessedcoolant @hipsterusername
|
||||||
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
/invokeai/frontend/training @lstein @blessedcoolant @hipsterusername
|
||||||
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
/invokeai/frontend/web @psychedelicious @blessedcoolant @maryhipp @hipsterusername
|
||||||
|
|
||||||
|
|
||||||
|
108
.github/ISSUE_TEMPLATE/BUG_REPORT.yml
vendored
@ -6,10 +6,6 @@ title: '[bug]: '
|
|||||||
|
|
||||||
labels: ['bug']
|
labels: ['bug']
|
||||||
|
|
||||||
# assignees:
|
|
||||||
# - moderator_bot
|
|
||||||
# - lstein
|
|
||||||
|
|
||||||
body:
|
body:
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
@ -18,10 +14,9 @@ body:
|
|||||||
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Is there an existing issue for this?
|
label: Is there an existing issue for this problem?
|
||||||
description: |
|
description: |
|
||||||
Please use the [search function](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)
|
Please [search](https://github.com/invoke-ai/InvokeAI/issues) first to see if an issue already exists for the problem.
|
||||||
irst to see if an issue already exists for the bug you encountered.
|
|
||||||
options:
|
options:
|
||||||
- label: I have searched the existing issues
|
- label: I have searched the existing issues
|
||||||
required: true
|
required: true
|
||||||
@ -33,35 +28,45 @@ body:
|
|||||||
- type: dropdown
|
- type: dropdown
|
||||||
id: os_dropdown
|
id: os_dropdown
|
||||||
attributes:
|
attributes:
|
||||||
label: OS
|
label: Operating system
|
||||||
description: Which operating System did you use when the bug occured
|
description: Your computer's operating system.
|
||||||
multiple: false
|
multiple: false
|
||||||
options:
|
options:
|
||||||
- 'Linux'
|
- 'Linux'
|
||||||
- 'Windows'
|
- 'Windows'
|
||||||
- 'macOS'
|
- 'macOS'
|
||||||
|
- 'other'
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
id: gpu_dropdown
|
id: gpu_dropdown
|
||||||
attributes:
|
attributes:
|
||||||
label: GPU
|
label: GPU vendor
|
||||||
description: Which kind of Graphic-Adapter is your System using
|
description: Your GPU's vendor.
|
||||||
multiple: false
|
multiple: false
|
||||||
options:
|
options:
|
||||||
- 'cuda'
|
- 'Nvidia (CUDA)'
|
||||||
- 'amd'
|
- 'AMD (ROCm)'
|
||||||
- 'mps'
|
- 'Apple Silicon (MPS)'
|
||||||
- 'cpu'
|
- 'None (CPU)'
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: gpu_model
|
||||||
|
attributes:
|
||||||
|
label: GPU model
|
||||||
|
description: Your GPU's model. If on Apple Silicon, this is your Mac's chip. Leave blank if on CPU.
|
||||||
|
placeholder: ex. RTX 2080 Ti, Mac M1 Pro
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
- type: input
|
- type: input
|
||||||
id: vram
|
id: vram
|
||||||
attributes:
|
attributes:
|
||||||
label: VRAM
|
label: GPU VRAM
|
||||||
description: Size of the VRAM if known
|
description: Your GPU's VRAM. If on Apple Silicon, this is your Mac's unified memory. Leave blank if on CPU.
|
||||||
placeholder: 8GB
|
placeholder: 8GB
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
@ -69,44 +74,73 @@ body:
|
|||||||
- type: input
|
- type: input
|
||||||
id: version-number
|
id: version-number
|
||||||
attributes:
|
attributes:
|
||||||
label: What version did you experience this issue on?
|
label: Version number
|
||||||
description: |
|
description: |
|
||||||
Please share the version of Invoke AI that you experienced the issue on. If this is not the latest version, please update first to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
The version of Invoke you have installed. If it is not the latest version, please update and try again to confirm the issue still exists. If you are testing main, please include the commit hash instead.
|
||||||
placeholder: X.X.X
|
placeholder: ex. 3.6.1
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: input
|
||||||
|
id: browser-version
|
||||||
|
attributes:
|
||||||
|
label: Browser
|
||||||
|
description: Your web browser and version.
|
||||||
|
placeholder: ex. Firefox 123.0b3
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: what-happened
|
id: python-deps
|
||||||
attributes:
|
attributes:
|
||||||
label: What happened?
|
label: Python dependencies
|
||||||
description: |
|
description: |
|
||||||
Briefly describe what happened, what you expected to happen and how to reproduce this bug.
|
If the problem occurred during image generation, click the gear icon at the bottom left corner, click "About", click the copy button and then paste here.
|
||||||
placeholder: When using the webinterface and right-clicking on button X instead of the popup-menu there error Y appears
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
|
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Screenshots
|
|
||||||
description: If applicable, add screenshots to help explain your problem
|
|
||||||
placeholder: this is what the result looked like <screenshot>
|
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
|
id: what-happened
|
||||||
|
attributes:
|
||||||
|
label: What happened
|
||||||
|
description: |
|
||||||
|
Describe what happened. Include any relevant error messages, stack traces and screenshots here.
|
||||||
|
placeholder: I clicked button X and then Y happened.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: what-you-expected
|
||||||
|
attributes:
|
||||||
|
label: What you expected to happen
|
||||||
|
description: Describe what you expected to happen.
|
||||||
|
placeholder: I expected Z to happen.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: how-to-repro
|
||||||
|
attributes:
|
||||||
|
label: How to reproduce the problem
|
||||||
|
description: List steps to reproduce the problem.
|
||||||
|
placeholder: Start the app, generate an image with these settings, then click button X.
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
|
||||||
|
- type: textarea
|
||||||
|
id: additional-context
|
||||||
attributes:
|
attributes:
|
||||||
label: Additional context
|
label: Additional context
|
||||||
description: Add any other context about the problem here
|
description: Any other context that might help us to understand the problem.
|
||||||
placeholder: Only happens when there is full moon and Friday the 13th on Christmas Eve 🎅🏻
|
placeholder: Only happens when there is full moon and Friday the 13th on Christmas Eve 🎅🏻
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
- type: input
|
- type: input
|
||||||
id: contact
|
id: discord-username
|
||||||
attributes:
|
attributes:
|
||||||
label: Contact Details
|
label: Discord username
|
||||||
description: __OPTIONAL__ How can we get in touch with you if we need more info (besides this issue)?
|
description: If you are on the Invoke discord and would prefer to be contacted there, please provide your username.
|
||||||
placeholder: ex. email@example.com, discordname, twitter, ...
|
placeholder: supercoolusername123
|
||||||
validations:
|
validations:
|
||||||
required: false
|
required: false
|
||||||
|
59
.github/pr_labels.yml
vendored
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
Root:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: '*'
|
||||||
|
|
||||||
|
PythonDeps:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'pyproject.toml'
|
||||||
|
|
||||||
|
Python:
|
||||||
|
- changed-files:
|
||||||
|
- all-globs-to-any-file:
|
||||||
|
- 'invokeai/**'
|
||||||
|
- '!invokeai/frontend/web/**'
|
||||||
|
|
||||||
|
PythonTests:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'tests/**'
|
||||||
|
|
||||||
|
CICD:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: .github/**
|
||||||
|
|
||||||
|
Docker:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: docker/**
|
||||||
|
|
||||||
|
Installer:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: installer/**
|
||||||
|
|
||||||
|
Documentation:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: docs/**
|
||||||
|
|
||||||
|
Invocations:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'invokeai/app/invocations/**'
|
||||||
|
|
||||||
|
Backend:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'invokeai/backend/**'
|
||||||
|
|
||||||
|
Api:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'invokeai/app/api/**'
|
||||||
|
|
||||||
|
Services:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'invokeai/app/services/**'
|
||||||
|
|
||||||
|
FrontendDeps:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file:
|
||||||
|
- '**/*/package.json'
|
||||||
|
- '**/*/pnpm-lock.yaml'
|
||||||
|
|
||||||
|
Frontend:
|
||||||
|
- changed-files:
|
||||||
|
- any-glob-to-any-file: 'invokeai/frontend/web/**'
|
15
.github/pull_request_template.md
vendored
@ -42,6 +42,21 @@ Please provide steps on how to test changes, any hardware or
|
|||||||
software specifications as well as any other pertinent information.
|
software specifications as well as any other pertinent information.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
## Merge Plan
|
||||||
|
|
||||||
|
<!--
|
||||||
|
A merge plan describes how this PR should be handled after it is approved.
|
||||||
|
|
||||||
|
Example merge plans:
|
||||||
|
- "This PR can be merged when approved"
|
||||||
|
- "This must be squash-merged when approved"
|
||||||
|
- "DO NOT MERGE - I will rebase and tidy commits before merging"
|
||||||
|
- "#dev-chat on discord needs to be advised of this change when it is merged"
|
||||||
|
|
||||||
|
A merge plan is particularly important for large PRs or PRs that touch the
|
||||||
|
database in any way.
|
||||||
|
-->
|
||||||
|
|
||||||
## Added/updated tests?
|
## Added/updated tests?
|
||||||
|
|
||||||
- [ ] Yes
|
- [ ] Yes
|
||||||
|
5
.github/workflows/build-container.yml
vendored
@ -40,10 +40,14 @@ jobs:
|
|||||||
- name: Free up more disk space on the runner
|
- name: Free up more disk space on the runner
|
||||||
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
# https://github.com/actions/runner-images/issues/2840#issuecomment-1284059930
|
||||||
run: |
|
run: |
|
||||||
|
echo "----- Free space before cleanup"
|
||||||
|
df -h
|
||||||
sudo rm -rf /usr/share/dotnet
|
sudo rm -rf /usr/share/dotnet
|
||||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||||
sudo swapoff /mnt/swapfile
|
sudo swapoff /mnt/swapfile
|
||||||
sudo rm -rf /mnt/swapfile
|
sudo rm -rf /mnt/swapfile
|
||||||
|
echo "----- Free space after cleanup"
|
||||||
|
df -h
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -91,6 +95,7 @@ jobs:
|
|||||||
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
# password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build container
|
- name: Build container
|
||||||
|
timeout-minutes: 40
|
||||||
id: docker_build
|
id: docker_build
|
||||||
uses: docker/build-push-action@v4
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
|
16
.github/workflows/label-pr.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
name: "Pull Request Labeler"
|
||||||
|
on:
|
||||||
|
- pull_request_target
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
labeler:
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- uses: actions/labeler@v5
|
||||||
|
with:
|
||||||
|
configuration-path: .github/pr_labels.yml
|
24
.github/workflows/lint-frontend.yml
vendored
@ -22,12 +22,22 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: Setup Node 18
|
- name: Setup Node 18
|
||||||
uses: actions/setup-node@v3
|
uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: '18'
|
node-version: '18'
|
||||||
- uses: actions/checkout@v3
|
- name: Checkout
|
||||||
- run: 'yarn install --frozen-lockfile'
|
uses: actions/checkout@v4
|
||||||
- run: 'yarn run lint:tsc'
|
- name: Setup pnpm
|
||||||
- run: 'yarn run lint:madge'
|
uses: pnpm/action-setup@v2
|
||||||
- run: 'yarn run lint:eslint'
|
with:
|
||||||
- run: 'yarn run lint:prettier'
|
version: '8.12.1'
|
||||||
|
- name: Install dependencies
|
||||||
|
run: 'pnpm install --prefer-frozen-lockfile'
|
||||||
|
- name: Typescript
|
||||||
|
run: 'pnpm run lint:tsc'
|
||||||
|
- name: Madge
|
||||||
|
run: 'pnpm run lint:madge'
|
||||||
|
- name: ESLint
|
||||||
|
run: 'pnpm run lint:eslint'
|
||||||
|
- name: Prettier
|
||||||
|
run: 'pnpm run lint:prettier'
|
||||||
|
20
.github/workflows/pyflakes.yml
vendored
@ -1,20 +0,0 @@
|
|||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
- development
|
|
||||||
- 'release-candidate-*'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
pyflakes:
|
|
||||||
name: runner / pyflakes
|
|
||||||
if: github.event.pull_request.draft == false
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: pyflakes
|
|
||||||
uses: reviewdog/action-pyflakes@v1
|
|
||||||
with:
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
reporter: github-pr-review
|
|
52
.github/workflows/pypi-release.yml
vendored
@ -1,13 +1,15 @@
|
|||||||
name: PyPI Release
|
name: PyPI Release
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
|
||||||
paths:
|
|
||||||
- 'invokeai/version/invokeai_version.py'
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
publish_package:
|
||||||
|
description: 'Publish build on PyPi? [true/false]'
|
||||||
|
required: true
|
||||||
|
default: 'false'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release:
|
build-and-release:
|
||||||
if: github.repository == 'invoke-ai/InvokeAI'
|
if: github.repository == 'invoke-ai/InvokeAI'
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
env:
|
env:
|
||||||
@ -15,20 +17,44 @@ jobs:
|
|||||||
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
|
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
TWINE_NON_INTERACTIVE: 1
|
TWINE_NON_INTERACTIVE: 1
|
||||||
steps:
|
steps:
|
||||||
- name: checkout sources
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: install deps
|
- name: Setup Node 18
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: '18'
|
||||||
|
|
||||||
|
- name: Setup pnpm
|
||||||
|
uses: pnpm/action-setup@v2
|
||||||
|
with:
|
||||||
|
version: '8.12.1'
|
||||||
|
|
||||||
|
- name: Install frontend dependencies
|
||||||
|
run: pnpm install --prefer-frozen-lockfile
|
||||||
|
working-directory: invokeai/frontend/web
|
||||||
|
|
||||||
|
- name: Build frontend
|
||||||
|
run: pnpm run build
|
||||||
|
working-directory: invokeai/frontend/web
|
||||||
|
|
||||||
|
- name: Install python dependencies
|
||||||
run: pip install --upgrade build twine
|
run: pip install --upgrade build twine
|
||||||
|
|
||||||
- name: build package
|
- name: Build python package
|
||||||
run: python3 -m build
|
run: python3 -m build
|
||||||
|
|
||||||
- name: check distribution
|
- name: Upload build as workflow artifact
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: dist
|
||||||
|
path: dist
|
||||||
|
|
||||||
|
- name: Check distribution
|
||||||
run: twine check dist/*
|
run: twine check dist/*
|
||||||
|
|
||||||
- name: check PyPI versions
|
- name: Check PyPI versions
|
||||||
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/v2.3'
|
if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/')
|
||||||
run: |
|
run: |
|
||||||
pip install --upgrade requests
|
pip install --upgrade requests
|
||||||
python -c "\
|
python -c "\
|
||||||
@ -36,6 +62,6 @@ jobs:
|
|||||||
EXISTS=scripts.pypi_helper.local_on_pypi(); \
|
EXISTS=scripts.pypi_helper.local_on_pypi(); \
|
||||||
print(f'PACKAGE_EXISTS={EXISTS}')" >> $GITHUB_ENV
|
print(f'PACKAGE_EXISTS={EXISTS}')" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: upload package
|
- name: Publish build on PyPi
|
||||||
if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != ''
|
if: env.PACKAGE_EXISTS == 'False' && env.TWINE_PASSWORD != '' && github.event.inputs.publish_package == 'true'
|
||||||
run: twine upload dist/*
|
run: twine upload dist/*
|
||||||
|
9
.github/workflows/style-checks.yml
vendored
@ -6,7 +6,7 @@ on:
|
|||||||
branches: main
|
branches: main
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
black:
|
ruff:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
@ -18,8 +18,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Install dependencies with pip
|
- name: Install dependencies with pip
|
||||||
run: |
|
run: |
|
||||||
pip install black flake8 Flake8-pyproject isort
|
pip install ruff
|
||||||
|
|
||||||
- run: isort --check-only .
|
- run: ruff check --output-format=github .
|
||||||
- run: black --check .
|
- run: ruff format --check .
|
||||||
- run: flake8
|
|
||||||
|
2
.github/workflows/test-invoke-pip.yml
vendored
@ -58,7 +58,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Check for changed python files
|
- name: Check for changed python files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
uses: tj-actions/changed-files@v37
|
uses: tj-actions/changed-files@v41
|
||||||
with:
|
with:
|
||||||
files_yaml: |
|
files_yaml: |
|
||||||
python:
|
python:
|
||||||
|
15
.gitignore
vendored
@ -1,8 +1,5 @@
|
|||||||
.idea/
|
.idea/
|
||||||
|
|
||||||
# ignore the Anaconda/Miniconda installer used while building Docker image
|
|
||||||
anaconda.sh
|
|
||||||
|
|
||||||
# Byte-compiled / optimized / DLL files
|
# Byte-compiled / optimized / DLL files
|
||||||
__pycache__/
|
__pycache__/
|
||||||
*.py[cod]
|
*.py[cod]
|
||||||
@ -19,7 +16,7 @@ __pycache__/
|
|||||||
.Python
|
.Python
|
||||||
build/
|
build/
|
||||||
develop-eggs/
|
develop-eggs/
|
||||||
# dist/
|
dist/
|
||||||
downloads/
|
downloads/
|
||||||
eggs/
|
eggs/
|
||||||
.eggs/
|
.eggs/
|
||||||
@ -136,12 +133,10 @@ celerybeat.pid
|
|||||||
|
|
||||||
# Environments
|
# Environments
|
||||||
.env
|
.env
|
||||||
.venv
|
.venv*
|
||||||
env/
|
env/
|
||||||
venv/
|
venv/
|
||||||
ENV/
|
ENV/
|
||||||
env.bak/
|
|
||||||
venv.bak/
|
|
||||||
|
|
||||||
# Spyder project settings
|
# Spyder project settings
|
||||||
.spyderproject
|
.spyderproject
|
||||||
@ -186,14 +181,10 @@ cython_debug/
|
|||||||
.scratch/
|
.scratch/
|
||||||
.vscode/
|
.vscode/
|
||||||
|
|
||||||
# ignore environment.yml and requirements.txt
|
|
||||||
# these are links to the real files in environments-and-requirements
|
|
||||||
environment.yml
|
|
||||||
requirements.txt
|
|
||||||
|
|
||||||
# source installer files
|
# source installer files
|
||||||
installer/*zip
|
installer/*zip
|
||||||
installer/install.bat
|
installer/install.bat
|
||||||
installer/install.sh
|
installer/install.sh
|
||||||
installer/update.bat
|
installer/update.bat
|
||||||
installer/update.sh
|
installer/update.sh
|
||||||
|
installer/InvokeAI-Installer/
|
||||||
|
52
Makefile
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
# simple Makefile with scripts that are otherwise hard to remember
|
||||||
|
# to use, run from the repo root `make <command>`
|
||||||
|
|
||||||
|
default: help
|
||||||
|
|
||||||
|
help:
|
||||||
|
@echo Developer commands:
|
||||||
|
@echo
|
||||||
|
@echo "ruff Run ruff, fixing any safely-fixable errors and formatting"
|
||||||
|
@echo "ruff-unsafe Run ruff, fixing all fixable errors and formatting"
|
||||||
|
@echo "mypy Run mypy using the config in pyproject.toml to identify type mismatches and other coding errors"
|
||||||
|
@echo "mypy-all Run mypy ignoring the config in pyproject.tom but still ignoring missing imports"
|
||||||
|
@echo "frontend-build Build the frontend in order to run on localhost:9090"
|
||||||
|
@echo "frontend-dev Run the frontend in developer mode on localhost:5173"
|
||||||
|
@echo "installer-zip Build the installer .zip file for the current version"
|
||||||
|
@echo "tag-release Tag the GitHub repository with the current version (use at release time only!)"
|
||||||
|
|
||||||
|
# Runs ruff, fixing any safely-fixable errors and formatting
|
||||||
|
ruff:
|
||||||
|
ruff check . --fix
|
||||||
|
ruff format .
|
||||||
|
|
||||||
|
# Runs ruff, fixing all errors it can fix and formatting
|
||||||
|
ruff-unsafe:
|
||||||
|
ruff check . --fix --unsafe-fixes
|
||||||
|
ruff format .
|
||||||
|
|
||||||
|
# Runs mypy, using the config in pyproject.toml
|
||||||
|
mypy:
|
||||||
|
mypy scripts/invokeai-web.py
|
||||||
|
|
||||||
|
# Runs mypy, ignoring the config in pyproject.toml but still ignoring missing (untyped) imports
|
||||||
|
# (many files are ignored by the config, so this is useful for checking all files)
|
||||||
|
mypy-all:
|
||||||
|
mypy scripts/invokeai-web.py --config-file= --ignore-missing-imports
|
||||||
|
|
||||||
|
# Build the frontend
|
||||||
|
frontend-build:
|
||||||
|
cd invokeai/frontend/web && pnpm build
|
||||||
|
|
||||||
|
# Run the frontend in dev mode
|
||||||
|
frontend-dev:
|
||||||
|
cd invokeai/frontend/web && pnpm dev
|
||||||
|
|
||||||
|
# Installer zip file
|
||||||
|
installer-zip:
|
||||||
|
cd installer && ./create_installer.sh
|
||||||
|
|
||||||
|
# Tag the release
|
||||||
|
tag-release:
|
||||||
|
cd installer && ./tag_release.sh
|
||||||
|
|
28
README.md
@ -1,10 +1,10 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
|
# Invoke - Professional Creative AI Tools for Visual Media
|
||||||
|
## To learn more about Invoke, or implement our Business solutions, visit [invoke.com](https://www.invoke.com/about)
|
||||||
|
|
||||||
# Invoke AI - Generative AI for Professional Creatives
|
|
||||||
## Professional Creative Tools for Stable Diffusion, Custom-Trained Models, and more.
|
|
||||||
To learn more about Invoke AI, get started instantly, or implement our Business solutions, visit [invoke.ai](https://invoke.ai)
|
|
||||||
|
|
||||||
|
|
||||||
[![discord badge]][discord link]
|
[![discord badge]][discord link]
|
||||||
@ -56,7 +56,9 @@ the foundation for multiple commercial products.
|
|||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||

|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@ -123,10 +125,10 @@ and go to http://localhost:9090.
|
|||||||
|
|
||||||
### Command-Line Installation (for developers and users familiar with Terminals)
|
### Command-Line Installation (for developers and users familiar with Terminals)
|
||||||
|
|
||||||
You must have Python 3.9 through 3.11 installed on your machine. Earlier or
|
You must have Python 3.10 through 3.11 installed on your machine. Earlier or
|
||||||
later versions are not supported.
|
later versions are not supported.
|
||||||
Node.js also needs to be installed along with yarn (can be installed with
|
Node.js also needs to be installed along with `pnpm` (can be installed with
|
||||||
the command `npm install -g yarn` if needed)
|
the command `npm install -g pnpm` if needed)
|
||||||
|
|
||||||
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
1. Open a command-line window on your machine. The PowerShell is recommended for Windows.
|
||||||
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
2. Create a directory to install InvokeAI into. You'll need at least 15 GB of free space:
|
||||||
@ -161,13 +163,13 @@ the command `npm install -g yarn` if needed)
|
|||||||
_For Windows/Linux with an NVIDIA GPU:_
|
_For Windows/Linux with an NVIDIA GPU:_
|
||||||
|
|
||||||
```terminal
|
```terminal
|
||||||
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu118
|
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||||
```
|
```
|
||||||
|
|
||||||
_For Linux with an AMD GPU:_
|
_For Linux with an AMD GPU:_
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||||
```
|
```
|
||||||
|
|
||||||
_For non-GPU systems:_
|
_For non-GPU systems:_
|
||||||
@ -175,7 +177,7 @@ the command `npm install -g yarn` if needed)
|
|||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
```
|
```
|
||||||
|
|
||||||
_For Macintoshes, either Intel or M1/M2:_
|
_For Macintoshes, either Intel or M1/M2/M3:_
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
pip install InvokeAI --use-pep517
|
pip install InvokeAI --use-pep517
|
||||||
@ -270,7 +272,7 @@ upgrade script.** See the next section for a Windows recipe.
|
|||||||
3. Select option [1] to upgrade to the latest release.
|
3. Select option [1] to upgrade to the latest release.
|
||||||
|
|
||||||
4. Once the upgrade is finished you will be returned to the launcher
|
4. Once the upgrade is finished you will be returned to the launcher
|
||||||
menu. Select option [7] "Re-run the configure script to fix a broken
|
menu. Select option [6] "Re-run the configure script to fix a broken
|
||||||
install or to complete a major upgrade".
|
install or to complete a major upgrade".
|
||||||
|
|
||||||
This will run the configure script against the v2.3 directory and
|
This will run the configure script against the v2.3 directory and
|
||||||
@ -395,7 +397,7 @@ Notes](https://github.com/invoke-ai/InvokeAI/releases) and the
|
|||||||
|
|
||||||
### Troubleshooting
|
### Troubleshooting
|
||||||
|
|
||||||
Please check out our **[Q&A](https://invoke-ai.github.io/InvokeAI/help/TROUBLESHOOT/#faq)** to get solutions for common installation
|
Please check out our **[Troubleshooting Guide](https://invoke-ai.github.io/InvokeAI/installation/010_INSTALL_AUTOMATED/#troubleshooting)** to get solutions for common installation
|
||||||
problems and other issues. For more help, please join our [Discord][discord link]
|
problems and other issues. For more help, please join our [Discord][discord link]
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
@ -1,13 +1,18 @@
|
|||||||
## Make a copy of this file named `.env` and fill in the values below.
|
## Make a copy of this file named `.env` and fill in the values below.
|
||||||
## Any environment variables supported by InvokeAI can be specified here.
|
## Any environment variables supported by InvokeAI can be specified here,
|
||||||
|
## in addition to the examples below.
|
||||||
|
|
||||||
# INVOKEAI_ROOT is the path to a path on the local filesystem where InvokeAI will store data.
|
# HOST_INVOKEAI_ROOT is the path on the docker host's filesystem where InvokeAI will store data.
|
||||||
# Outputs will also be stored here by default.
|
# Outputs will also be stored here by default.
|
||||||
# This **must** be an absolute path.
|
# If relative, it will be relative to the docker directory in which the docker-compose.yml file is located
|
||||||
INVOKEAI_ROOT=
|
#HOST_INVOKEAI_ROOT=../../invokeai-data
|
||||||
|
|
||||||
HUGGINGFACE_TOKEN=
|
# INVOKEAI_ROOT is the path to the root of the InvokeAI repository within the container.
|
||||||
|
# INVOKEAI_ROOT=~/invokeai
|
||||||
|
|
||||||
## optional variables specific to the docker setup
|
# Get this value from your HuggingFace account settings page.
|
||||||
# GPU_DRIVER=cuda
|
# HUGGING_FACE_HUB_TOKEN=
|
||||||
|
|
||||||
|
## optional variables specific to the docker setup.
|
||||||
|
# GPU_DRIVER=nvidia #| rocm
|
||||||
# CONTAINER_UID=1000
|
# CONTAINER_UID=1000
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
## Builder stage
|
## Builder stage
|
||||||
|
|
||||||
FROM library/ubuntu:22.04 AS builder
|
FROM library/ubuntu:23.04 AS builder
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||||
@ -10,7 +10,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
|||||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
apt update && apt-get install -y \
|
apt update && apt-get install -y \
|
||||||
git \
|
git \
|
||||||
python3.10-venv \
|
python3-venv \
|
||||||
python3-pip \
|
python3-pip \
|
||||||
build-essential
|
build-essential
|
||||||
|
|
||||||
@ -18,8 +18,8 @@ ENV INVOKEAI_SRC=/opt/invokeai
|
|||||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||||
|
|
||||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
ARG TORCH_VERSION=2.0.1
|
ARG TORCH_VERSION=2.1.2
|
||||||
ARG TORCHVISION_VERSION=0.15.2
|
ARG TORCHVISION_VERSION=0.16.2
|
||||||
ARG GPU_DRIVER=cuda
|
ARG GPU_DRIVER=cuda
|
||||||
ARG TARGETPLATFORM="linux/amd64"
|
ARG TARGETPLATFORM="linux/amd64"
|
||||||
# unused but available
|
# unused but available
|
||||||
@ -35,9 +35,9 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
|||||||
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
||||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
||||||
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
||||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.4.2"; \
|
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm5.6"; \
|
||||||
else \
|
else \
|
||||||
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu118"; \
|
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu121"; \
|
||||||
fi &&\
|
fi &&\
|
||||||
pip install $extra_index_url_arg \
|
pip install $extra_index_url_arg \
|
||||||
torch==$TORCH_VERSION \
|
torch==$TORCH_VERSION \
|
||||||
@ -54,23 +54,25 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
|||||||
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||||
pip install -e ".[xformers]"; \
|
pip install -e ".[xformers]"; \
|
||||||
else \
|
else \
|
||||||
pip install -e "."; \
|
pip install $extra_index_url_arg -e "."; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# #### Build the Web UI ------------------------------------
|
# #### Build the Web UI ------------------------------------
|
||||||
|
|
||||||
FROM node:18 AS web-builder
|
FROM node:20-slim AS web-builder
|
||||||
|
ENV PNPM_HOME="/pnpm"
|
||||||
|
ENV PATH="$PNPM_HOME:$PATH"
|
||||||
|
RUN corepack enable
|
||||||
|
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
COPY invokeai/frontend/web/ ./
|
COPY invokeai/frontend/web/ ./
|
||||||
RUN --mount=type=cache,target=/usr/lib/node_modules \
|
RUN --mount=type=cache,target=/pnpm/store \
|
||||||
npm install --include dev
|
pnpm install --frozen-lockfile
|
||||||
RUN --mount=type=cache,target=/usr/lib/node_modules \
|
RUN npx vite build
|
||||||
yarn vite build
|
|
||||||
|
|
||||||
|
|
||||||
#### Runtime stage ---------------------------------------
|
#### Runtime stage ---------------------------------------
|
||||||
|
|
||||||
FROM library/ubuntu:22.04 AS runtime
|
FROM library/ubuntu:23.04 AS runtime
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
ENV PYTHONUNBUFFERED=1
|
ENV PYTHONUNBUFFERED=1
|
||||||
@ -85,6 +87,7 @@ RUN apt update && apt install -y --no-install-recommends \
|
|||||||
iotop \
|
iotop \
|
||||||
bzip2 \
|
bzip2 \
|
||||||
gosu \
|
gosu \
|
||||||
|
magic-wormhole \
|
||||||
libglib2.0-0 \
|
libglib2.0-0 \
|
||||||
libgl1-mesa-glx \
|
libgl1-mesa-glx \
|
||||||
python3-venv \
|
python3-venv \
|
||||||
@ -94,15 +97,13 @@ RUN apt update && apt install -y --no-install-recommends \
|
|||||||
libstdc++-10-dev &&\
|
libstdc++-10-dev &&\
|
||||||
apt-get clean && apt-get autoclean
|
apt-get clean && apt-get autoclean
|
||||||
|
|
||||||
# globally add magic-wormhole
|
|
||||||
# for ease of transferring data to and from the container
|
|
||||||
# when running in sandboxed cloud environments; e.g. Runpod etc.
|
|
||||||
RUN pip install magic-wormhole
|
|
||||||
|
|
||||||
ENV INVOKEAI_SRC=/opt/invokeai
|
ENV INVOKEAI_SRC=/opt/invokeai
|
||||||
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
||||||
ENV INVOKEAI_ROOT=/invokeai
|
ENV INVOKEAI_ROOT=/invokeai
|
||||||
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
|
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
|
||||||
|
ENV CONTAINER_UID=${CONTAINER_UID:-1000}
|
||||||
|
ENV CONTAINER_GID=${CONTAINER_GID:-1000}
|
||||||
|
|
||||||
# --link requires buldkit w/ dockerfile syntax 1.4
|
# --link requires buldkit w/ dockerfile syntax 1.4
|
||||||
COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
|
COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
|
||||||
@ -120,9 +121,7 @@ WORKDIR ${INVOKEAI_SRC}
|
|||||||
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
||||||
RUN python3 -c "from patchmatch import patch_match"
|
RUN python3 -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
# Create unprivileged user and make the local dir
|
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
||||||
RUN useradd --create-home --shell /bin/bash -u 1000 --comment "container local user" invoke
|
|
||||||
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R invoke:invoke ${INVOKEAI_ROOT}
|
|
||||||
|
|
||||||
COPY docker/docker-entrypoint.sh ./
|
COPY docker/docker-entrypoint.sh ./
|
||||||
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
||||||
|
@ -1,11 +1,19 @@
|
|||||||
# InvokeAI Containerized
|
# InvokeAI Containerized
|
||||||
|
|
||||||
All commands are to be run from the `docker` directory: `cd docker`
|
All commands should be run within the `docker` directory: `cd docker`
|
||||||
|
|
||||||
|
## Quickstart :rocket:
|
||||||
|
|
||||||
|
On a known working Linux+Docker+CUDA (Nvidia) system, execute `./run.sh` in this directory. It will take a few minutes - depending on your internet speed - to install the core models. Once the application starts up, open `http://localhost:9090` in your browser to Invoke!
|
||||||
|
|
||||||
|
For more configuration options (using an AMD GPU, custom root directory location, etc): read on.
|
||||||
|
|
||||||
|
## Detailed setup
|
||||||
|
|
||||||
#### Linux
|
#### Linux
|
||||||
|
|
||||||
1. Ensure builkit is enabled in the Docker daemon settings (`/etc/docker/daemon.json`)
|
1. Ensure builkit is enabled in the Docker daemon settings (`/etc/docker/daemon.json`)
|
||||||
2. Install the `docker compose` plugin using your package manager, or follow a [tutorial](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-compose-on-ubuntu-22-04).
|
2. Install the `docker compose` plugin using your package manager, or follow a [tutorial](https://docs.docker.com/compose/install/linux/#install-using-the-repository).
|
||||||
- The deprecated `docker-compose` (hyphenated) CLI continues to work for now.
|
- The deprecated `docker-compose` (hyphenated) CLI continues to work for now.
|
||||||
3. Ensure docker daemon is able to access the GPU.
|
3. Ensure docker daemon is able to access the GPU.
|
||||||
- You may need to install [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
|
- You may need to install [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
|
||||||
@ -18,13 +26,12 @@ All commands are to be run from the `docker` directory: `cd docker`
|
|||||||
|
|
||||||
This is done via Docker Desktop preferences
|
This is done via Docker Desktop preferences
|
||||||
|
|
||||||
## Quickstart
|
### Configure Invoke environment
|
||||||
|
|
||||||
|
1. Make a copy of `.env.sample` and name it `.env` (`cp .env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
|
||||||
1. Make a copy of `env.sample` and name it `.env` (`cp env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to:
|
|
||||||
a. the desired location of the InvokeAI runtime directory, or
|
a. the desired location of the InvokeAI runtime directory, or
|
||||||
b. an existing, v3.0.0 compatible runtime directory.
|
b. an existing, v3.0.0 compatible runtime directory.
|
||||||
1. `docker compose up`
|
1. Execute `run.sh`
|
||||||
|
|
||||||
The image will be built automatically if needed.
|
The image will be built automatically if needed.
|
||||||
|
|
||||||
@ -38,24 +45,28 @@ The runtime directory (holding models and outputs) will be created in the locati
|
|||||||
|
|
||||||
The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker documentation for the most up-to-date instructions for using your GPU with Docker.
|
The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker documentation for the most up-to-date instructions for using your GPU with Docker.
|
||||||
|
|
||||||
|
To use an AMD GPU, set `GPU_DRIVER=rocm` in your `.env` file.
|
||||||
|
|
||||||
## Customize
|
## Customize
|
||||||
|
|
||||||
Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `docker compose up`, your custom values will be used.
|
Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `run.sh`, your custom values will be used.
|
||||||
|
|
||||||
You can also set these values in `docker compose.yml` directly, but `.env` will help avoid conflicts when code is updated.
|
You can also set these values in `docker-compose.yml` directly, but `.env` will help avoid conflicts when code is updated.
|
||||||
|
|
||||||
Example (most values are optional):
|
Values are optional, but setting `INVOKEAI_ROOT` is highly recommended. The default is `~/invokeai`. Example:
|
||||||
|
|
||||||
```
|
```bash
|
||||||
INVOKEAI_ROOT=/Volumes/WorkDrive/invokeai
|
INVOKEAI_ROOT=/Volumes/WorkDrive/invokeai
|
||||||
HUGGINGFACE_TOKEN=the_actual_token
|
HUGGINGFACE_TOKEN=the_actual_token
|
||||||
CONTAINER_UID=1000
|
CONTAINER_UID=1000
|
||||||
GPU_DRIVER=cuda
|
GPU_DRIVER=nvidia
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Any environment variables supported by InvokeAI can be set here - please see the [Configuration docs](https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/) for further detail.
|
||||||
|
|
||||||
## Even Moar Customizing!
|
## Even Moar Customizing!
|
||||||
|
|
||||||
See the `docker compose.yaml` file. The `command` instruction can be uncommented and used to run arbitrary startup commands. Some examples below.
|
See the `docker-compose.yml` file. The `command` instruction can be uncommented and used to run arbitrary startup commands. Some examples below.
|
||||||
|
|
||||||
### Reconfigure the runtime directory
|
### Reconfigure the runtime directory
|
||||||
|
|
||||||
@ -63,7 +74,7 @@ Can be used to download additional models from the supported model list
|
|||||||
|
|
||||||
In conjunction with `INVOKEAI_ROOT` can be also used to initialize a runtime directory
|
In conjunction with `INVOKEAI_ROOT` can be also used to initialize a runtime directory
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
command:
|
command:
|
||||||
- invokeai-configure
|
- invokeai-configure
|
||||||
- --yes
|
- --yes
|
||||||
@ -71,7 +82,7 @@ command:
|
|||||||
|
|
||||||
Or install models:
|
Or install models:
|
||||||
|
|
||||||
```
|
```yaml
|
||||||
command:
|
command:
|
||||||
- invokeai-model-install
|
- invokeai-model-install
|
||||||
```
|
```
|
@ -1,11 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
build_args=""
|
|
||||||
|
|
||||||
[[ -f ".env" ]] && build_args=$(awk '$1 ~ /\=[^$]/ {print "--build-arg " $0 " "}' .env)
|
|
||||||
|
|
||||||
echo "docker-compose build args:"
|
|
||||||
echo $build_args
|
|
||||||
|
|
||||||
docker-compose build $build_args
|
|
@ -2,19 +2,8 @@
|
|||||||
|
|
||||||
version: '3.8'
|
version: '3.8'
|
||||||
|
|
||||||
services:
|
x-invokeai: &invokeai
|
||||||
invokeai:
|
|
||||||
image: "local/invokeai:latest"
|
image: "local/invokeai:latest"
|
||||||
# edit below to run on a container runtime other than nvidia-container-runtime.
|
|
||||||
# not yet tested with rocm/AMD GPUs
|
|
||||||
# Comment out the "deploy" section to run on CPU only
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
reservations:
|
|
||||||
devices:
|
|
||||||
- driver: nvidia
|
|
||||||
count: 1
|
|
||||||
capabilities: [gpu]
|
|
||||||
build:
|
build:
|
||||||
context: ..
|
context: ..
|
||||||
dockerfile: docker/Dockerfile
|
dockerfile: docker/Dockerfile
|
||||||
@ -32,7 +21,9 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- "${INVOKEAI_PORT:-9090}:9090"
|
- "${INVOKEAI_PORT:-9090}:9090"
|
||||||
volumes:
|
volumes:
|
||||||
- ${INVOKEAI_ROOT:-~/invokeai}:${INVOKEAI_ROOT:-/invokeai}
|
- type: bind
|
||||||
|
source: ${HOST_INVOKEAI_ROOT:-${INVOKEAI_ROOT:-~/invokeai}}
|
||||||
|
target: ${INVOKEAI_ROOT:-/invokeai}
|
||||||
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
|
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
|
||||||
# - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}}
|
# - ${INVOKEAI_MODELS_DIR:-${INVOKEAI_ROOT:-/invokeai/models}}
|
||||||
# - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}}
|
# - ${INVOKEAI_MODELS_CONFIG_PATH:-${INVOKEAI_ROOT:-/invokeai/configs/models.yaml}}
|
||||||
@ -46,3 +37,27 @@ services:
|
|||||||
# - |
|
# - |
|
||||||
# invokeai-model-install --yes --default-only --config_file ${INVOKEAI_ROOT}/config_custom.yaml
|
# invokeai-model-install --yes --default-only --config_file ${INVOKEAI_ROOT}/config_custom.yaml
|
||||||
# invokeai-nodes-web --host 0.0.0.0
|
# invokeai-nodes-web --host 0.0.0.0
|
||||||
|
|
||||||
|
services:
|
||||||
|
invokeai-nvidia:
|
||||||
|
<<: *invokeai
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
reservations:
|
||||||
|
devices:
|
||||||
|
- driver: nvidia
|
||||||
|
count: 1
|
||||||
|
capabilities: [gpu]
|
||||||
|
|
||||||
|
invokeai-cpu:
|
||||||
|
<<: *invokeai
|
||||||
|
profiles:
|
||||||
|
- cpu
|
||||||
|
|
||||||
|
invokeai-rocm:
|
||||||
|
<<: *invokeai
|
||||||
|
devices:
|
||||||
|
- /dev/kfd:/dev/kfd
|
||||||
|
- /dev/dri:/dev/dri
|
||||||
|
profiles:
|
||||||
|
- rocm
|
||||||
|
@ -19,7 +19,7 @@ set -e -o pipefail
|
|||||||
# Default UID: 1000 chosen due to popularity on Linux systems. Possibly 501 on MacOS.
|
# Default UID: 1000 chosen due to popularity on Linux systems. Possibly 501 on MacOS.
|
||||||
|
|
||||||
USER_ID=${CONTAINER_UID:-1000}
|
USER_ID=${CONTAINER_UID:-1000}
|
||||||
USER=invoke
|
USER=ubuntu
|
||||||
usermod -u ${USER_ID} ${USER} 1>/dev/null
|
usermod -u ${USER_ID} ${USER} 1>/dev/null
|
||||||
|
|
||||||
configure() {
|
configure() {
|
||||||
|
@ -1,8 +1,32 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e -o pipefail
|
||||||
|
|
||||||
SCRIPTDIR=$(dirname "${BASH_SOURCE[0]}")
|
run() {
|
||||||
cd "$SCRIPTDIR" || exit 1
|
local scriptdir=$(dirname "${BASH_SOURCE[0]}")
|
||||||
|
cd "$scriptdir" || exit 1
|
||||||
|
|
||||||
docker-compose up --build -d
|
local build_args=""
|
||||||
docker-compose logs -f
|
local profile=""
|
||||||
|
|
||||||
|
touch .env
|
||||||
|
build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
|
||||||
|
profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)"
|
||||||
|
|
||||||
|
[[ -z "$profile" ]] && profile="nvidia"
|
||||||
|
|
||||||
|
local service_name="invokeai-$profile"
|
||||||
|
|
||||||
|
if [[ ! -z "$build_args" ]]; then
|
||||||
|
printf "%s\n" "docker compose build args:"
|
||||||
|
printf "%s\n" "$build_args"
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker compose build $build_args $service_name
|
||||||
|
unset build_args
|
||||||
|
|
||||||
|
printf "%s\n" "starting service $service_name"
|
||||||
|
docker compose --profile "$profile" up -d "$service_name"
|
||||||
|
docker compose logs -f
|
||||||
|
}
|
||||||
|
|
||||||
|
run
|
||||||
|
@ -488,7 +488,7 @@ sections describe what's new for InvokeAI.
|
|||||||
|
|
||||||
- A choice of installer scripts that automate installation and configuration.
|
- A choice of installer scripts that automate installation and configuration.
|
||||||
See
|
See
|
||||||
[Installation](installation/index.md).
|
[Installation](installation/INSTALLATION.md).
|
||||||
- A streamlined manual installation process that works for both Conda and
|
- A streamlined manual installation process that works for both Conda and
|
||||||
PIP-only installs. See
|
PIP-only installs. See
|
||||||
[Manual Installation](installation/020_INSTALL_MANUAL.md).
|
[Manual Installation](installation/020_INSTALL_MANUAL.md).
|
||||||
@ -657,7 +657,7 @@ sections describe what's new for InvokeAI.
|
|||||||
|
|
||||||
## v1.13 <small>(3 September 2022)</small>
|
## v1.13 <small>(3 September 2022)</small>
|
||||||
|
|
||||||
- Support image variations (see [VARIATIONS](features/VARIATIONS.md)
|
- Support image variations (see [VARIATIONS](deprecated/VARIATIONS.md)
|
||||||
([Kevin Gibbons](https://github.com/bakkot) and many contributors and
|
([Kevin Gibbons](https://github.com/bakkot) and many contributors and
|
||||||
reviewers)
|
reviewers)
|
||||||
- Supports a Google Colab notebook for a standalone server running on Google
|
- Supports a Google Colab notebook for a standalone server running on Google
|
||||||
|
Before Width: | Height: | Size: 297 KiB After Width: | Height: | Size: 46 KiB |
Before Width: | Height: | Size: 1.1 MiB After Width: | Height: | Size: 4.9 MiB |
Before Width: | Height: | Size: 169 KiB After Width: | Height: | Size: 1.1 MiB |
Before Width: | Height: | Size: 194 KiB After Width: | Height: | Size: 131 KiB |
Before Width: | Height: | Size: 209 KiB After Width: | Height: | Size: 122 KiB |
Before Width: | Height: | Size: 114 KiB After Width: | Height: | Size: 95 KiB |
Before Width: | Height: | Size: 187 KiB After Width: | Height: | Size: 123 KiB |
Before Width: | Height: | Size: 112 KiB After Width: | Height: | Size: 107 KiB |
Before Width: | Height: | Size: 132 KiB After Width: | Height: | Size: 61 KiB |
Before Width: | Height: | Size: 167 KiB After Width: | Height: | Size: 119 KiB |
Before Width: | Height: | Size: 70 KiB |
Before Width: | Height: | Size: 59 KiB After Width: | Height: | Size: 60 KiB |
BIN
docs/assets/nodes/workflow_library.png
Normal file
After Width: | Height: | Size: 129 KiB |
277
docs/contributing/DOWNLOAD_QUEUE.md
Normal file
@ -0,0 +1,277 @@
|
|||||||
|
# The InvokeAI Download Queue
|
||||||
|
|
||||||
|
The DownloadQueueService provides a multithreaded parallel download
|
||||||
|
queue for arbitrary URLs, with queue prioritization, event handling,
|
||||||
|
and restart capabilities.
|
||||||
|
|
||||||
|
## Simple Example
|
||||||
|
|
||||||
|
```
|
||||||
|
from invokeai.app.services.download import DownloadQueueService, TqdmProgress
|
||||||
|
|
||||||
|
download_queue = DownloadQueueService()
|
||||||
|
for url in ['https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/assets/a-painting-of-a-fire.png?raw=true',
|
||||||
|
'https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/assets/birdhouse.png?raw=true',
|
||||||
|
'https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/assets/missing.png',
|
||||||
|
'https://civitai.com/api/download/models/152309?type=Model&format=SafeTensor',
|
||||||
|
]:
|
||||||
|
|
||||||
|
# urls start downloading as soon as download() is called
|
||||||
|
download_queue.download(source=url,
|
||||||
|
dest='/tmp/downloads',
|
||||||
|
on_progress=TqdmProgress().update
|
||||||
|
)
|
||||||
|
|
||||||
|
download_queue.join() # wait for all downloads to finish
|
||||||
|
for job in download_queue.list_jobs():
|
||||||
|
print(job.model_dump_json(exclude_none=True, indent=4),"\n")
|
||||||
|
```
|
||||||
|
|
||||||
|
Output:
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"source": "https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/assets/a-painting-of-a-fire.png?raw=true",
|
||||||
|
"dest": "/tmp/downloads",
|
||||||
|
"id": 0,
|
||||||
|
"priority": 10,
|
||||||
|
"status": "completed",
|
||||||
|
"download_path": "/tmp/downloads/a-painting-of-a-fire.png",
|
||||||
|
"job_started": "2023-12-04T05:34:41.742174",
|
||||||
|
"job_ended": "2023-12-04T05:34:42.592035",
|
||||||
|
"bytes": 666734,
|
||||||
|
"total_bytes": 666734
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
"source": "https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/assets/birdhouse.png?raw=true",
|
||||||
|
"dest": "/tmp/downloads",
|
||||||
|
"id": 1,
|
||||||
|
"priority": 10,
|
||||||
|
"status": "completed",
|
||||||
|
"download_path": "/tmp/downloads/birdhouse.png",
|
||||||
|
"job_started": "2023-12-04T05:34:41.741975",
|
||||||
|
"job_ended": "2023-12-04T05:34:42.652841",
|
||||||
|
"bytes": 774949,
|
||||||
|
"total_bytes": 774949
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
"source": "https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/assets/missing.png",
|
||||||
|
"dest": "/tmp/downloads",
|
||||||
|
"id": 2,
|
||||||
|
"priority": 10,
|
||||||
|
"status": "error",
|
||||||
|
"job_started": "2023-12-04T05:34:41.742079",
|
||||||
|
"job_ended": "2023-12-04T05:34:42.147625",
|
||||||
|
"bytes": 0,
|
||||||
|
"total_bytes": 0,
|
||||||
|
"error_type": "HTTPError(Not Found)",
|
||||||
|
"error": "Traceback (most recent call last):\n File \"/home/lstein/Projects/InvokeAI/invokeai/app/services/download/download_default.py\", line 182, in _download_next_item\n self._do_download(job)\n File \"/home/lstein/Projects/InvokeAI/invokeai/app/services/download/download_default.py\", line 206, in _do_download\n raise HTTPError(resp.reason)\nrequests.exceptions.HTTPError: Not Found\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
"source": "https://civitai.com/api/download/models/152309?type=Model&format=SafeTensor",
|
||||||
|
"dest": "/tmp/downloads",
|
||||||
|
"id": 3,
|
||||||
|
"priority": 10,
|
||||||
|
"status": "completed",
|
||||||
|
"download_path": "/tmp/downloads/xl_more_art-full_v1.safetensors",
|
||||||
|
"job_started": "2023-12-04T05:34:42.147645",
|
||||||
|
"job_ended": "2023-12-04T05:34:43.735990",
|
||||||
|
"bytes": 719020768,
|
||||||
|
"total_bytes": 719020768
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## The API
|
||||||
|
|
||||||
|
The default download queue is `DownloadQueueService`, an
|
||||||
|
implementation of ABC `DownloadQueueServiceBase`. It juggles multiple
|
||||||
|
background download requests and provides facilities for interrogating
|
||||||
|
and cancelling the requests. Access to a current or past download task
|
||||||
|
is mediated via `DownloadJob` objects which report the current status
|
||||||
|
of a job request
|
||||||
|
|
||||||
|
### The Queue Object
|
||||||
|
|
||||||
|
A default download queue is located in
|
||||||
|
`ApiDependencies.invoker.services.download_queue`. However, you can
|
||||||
|
create additional instances if you need to isolate your queue from the
|
||||||
|
main one.
|
||||||
|
|
||||||
|
```
|
||||||
|
queue = DownloadQueueService(event_bus=events)
|
||||||
|
```
|
||||||
|
|
||||||
|
`DownloadQueueService()` takes three optional arguments:
|
||||||
|
|
||||||
|
| **Argument** | **Type** | **Default** | **Description** |
|
||||||
|
|----------------|-----------------|---------------|-----------------|
|
||||||
|
| `max_parallel_dl` | int | 5 | Maximum number of simultaneous downloads allowed |
|
||||||
|
| `event_bus` | EventServiceBase | None | System-wide FastAPI event bus for reporting download events |
|
||||||
|
| `requests_session` | requests.sessions.Session | None | An alternative requests Session object to use for the download |
|
||||||
|
|
||||||
|
`max_parallel_dl` specifies how many download jobs are allowed to run
|
||||||
|
simultaneously. Each will run in a different thread of execution.
|
||||||
|
|
||||||
|
`event_bus` is an EventServiceBase, typically the one created at
|
||||||
|
InvokeAI startup. If present, download events are periodically emitted
|
||||||
|
on this bus to allow clients to follow download progress.
|
||||||
|
|
||||||
|
`requests_session` is a url library requests Session object. It is
|
||||||
|
used for testing.
|
||||||
|
|
||||||
|
### The Job object
|
||||||
|
|
||||||
|
The queue operates on a series of download job objects. These objects
|
||||||
|
specify the source and destination of the download, and keep track of
|
||||||
|
the progress of the download.
|
||||||
|
|
||||||
|
The only job type currently implemented is `DownloadJob`, a pydantic object with the
|
||||||
|
following fields:
|
||||||
|
|
||||||
|
| **Field** | **Type** | **Default** | **Description** |
|
||||||
|
|----------------|-----------------|---------------|-----------------|
|
||||||
|
| _Fields passed in at job creation time_ |
|
||||||
|
| `source` | AnyHttpUrl | | Where to download from |
|
||||||
|
| `dest` | Path | | Where to download to |
|
||||||
|
| `access_token` | str | | [optional] string containing authentication token for access |
|
||||||
|
| `on_start` | Callable | | [optional] callback when the download starts |
|
||||||
|
| `on_progress` | Callable | | [optional] callback called at intervals during download progress |
|
||||||
|
| `on_complete` | Callable | | [optional] callback called after successful download completion |
|
||||||
|
| `on_error` | Callable | | [optional] callback called after an error occurs |
|
||||||
|
| `id` | int | auto assigned | Job ID, an integer >= 0 |
|
||||||
|
| `priority` | int | 10 | Job priority. Lower priorities run before higher priorities |
|
||||||
|
| |
|
||||||
|
| _Fields updated over the course of the download task_
|
||||||
|
| `status` | DownloadJobStatus| | Status code |
|
||||||
|
| `download_path` | Path | | Path to the location of the downloaded file |
|
||||||
|
| `job_started` | float | | Timestamp for when the job started running |
|
||||||
|
| `job_ended` | float | | Timestamp for when the job completed or errored out |
|
||||||
|
| `job_sequence` | int | | A counter that is incremented each time a model is dequeued |
|
||||||
|
| `bytes` | int | 0 | Bytes downloaded so far |
|
||||||
|
| `total_bytes` | int | 0 | Total size of the file at the remote site |
|
||||||
|
| `error_type` | str | | String version of the exception that caused an error during download |
|
||||||
|
| `error` | str | | String version of the traceback associated with an error |
|
||||||
|
| `cancelled` | bool | False | Set to true if the job was cancelled by the caller|
|
||||||
|
|
||||||
|
When you create a job, you can assign it a `priority`. If multiple
|
||||||
|
jobs are queued, the job with the lowest priority runs first.
|
||||||
|
|
||||||
|
Every job has a `source` and a `dest`. `source` is a pydantic.networks AnyHttpUrl object.
|
||||||
|
The `dest` is a path on the local filesystem that specifies the
|
||||||
|
destination for the downloaded object. Its semantics are
|
||||||
|
described below.
|
||||||
|
|
||||||
|
When the job is submitted, it is assigned a numeric `id`. The id can
|
||||||
|
then be used to fetch the job object from the queue.
|
||||||
|
|
||||||
|
The `status` field is updated by the queue to indicate where the job
|
||||||
|
is in its lifecycle. Values are defined in the string enum
|
||||||
|
`DownloadJobStatus`, a symbol available from
|
||||||
|
`invokeai.app.services.download_manager`. Possible values are:
|
||||||
|
|
||||||
|
| **Value** | **String Value** | ** Description ** |
|
||||||
|
|--------------|---------------------|-------------------|
|
||||||
|
| `WAITING` | waiting | Job is on the queue but not yet running|
|
||||||
|
| `RUNNING` | running | The download is started |
|
||||||
|
| `COMPLETED` | completed | Job has finished its work without an error |
|
||||||
|
| `ERROR` | error | Job encountered an error and will not run again|
|
||||||
|
|
||||||
|
`job_started` and `job_ended` indicate when the job
|
||||||
|
was started (using a python timestamp) and when it completed.
|
||||||
|
|
||||||
|
In case of an error, the job's status will be set to `DownloadJobStatus.ERROR`, the text of the
|
||||||
|
Exception that caused the error will be placed in the `error_type`
|
||||||
|
field and the traceback that led to the error will be in `error`.
|
||||||
|
|
||||||
|
A cancelled job will have status `DownloadJobStatus.ERROR` and an
|
||||||
|
`error_type` field of "DownloadJobCancelledException". In addition,
|
||||||
|
the job's `cancelled` property will be set to True.
|
||||||
|
|
||||||
|
### Callbacks
|
||||||
|
|
||||||
|
Download jobs can be associated with a series of callbacks, each with
|
||||||
|
the signature `Callable[["DownloadJob"], None]`. The callbacks are assigned
|
||||||
|
using optional arguments `on_start`, `on_progress`, `on_complete` and
|
||||||
|
`on_error`. When the corresponding event occurs, the callback wil be
|
||||||
|
invoked and passed the job. The callback will be run in a `try:`
|
||||||
|
context in the same thread as the download job. Any exceptions that
|
||||||
|
occur during execution of the callback will be caught and converted
|
||||||
|
into a log error message, thereby allowing the download to continue.
|
||||||
|
|
||||||
|
#### `TqdmProgress`
|
||||||
|
|
||||||
|
The `invokeai.app.services.download.download_default` module defines a
|
||||||
|
class named `TqdmProgress` which can be used as an `on_progress`
|
||||||
|
handler to display a completion bar in the console. Use as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
from invokeai.app.services.download import TqdmProgress
|
||||||
|
|
||||||
|
download_queue.download(source='http://some.server.somewhere/some_file',
|
||||||
|
dest='/tmp/downloads',
|
||||||
|
on_progress=TqdmProgress().update
|
||||||
|
)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Events
|
||||||
|
|
||||||
|
If the queue was initialized with the InvokeAI event bus (the case
|
||||||
|
when using `ApiDependencies.invoker.services.download_queue`), then
|
||||||
|
download events will also be issued on the bus. The events are:
|
||||||
|
|
||||||
|
* `download_started` -- This is issued when a job is taken off the
|
||||||
|
queue and a request is made to the remote server for the URL headers, but before any data
|
||||||
|
has been downloaded. The event payload will contain the keys `source`
|
||||||
|
and `download_path`. The latter contains the path that the URL will be
|
||||||
|
downloaded to.
|
||||||
|
|
||||||
|
* `download_progress -- This is issued periodically as the download
|
||||||
|
runs. The payload contains the keys `source`, `download_path`,
|
||||||
|
`current_bytes` and `total_bytes`. The latter two fields can be
|
||||||
|
used to display the percent complete.
|
||||||
|
|
||||||
|
* `download_complete` -- This is issued when the download completes
|
||||||
|
successfully. The payload contains the keys `source`, `download_path`
|
||||||
|
and `total_bytes`.
|
||||||
|
|
||||||
|
* `download_error` -- This is issued when the download stops because
|
||||||
|
of an error condition. The payload contains the fields `error_type`
|
||||||
|
and `error`. The former is the text representation of the exception,
|
||||||
|
and the latter is a traceback showing where the error occurred.
|
||||||
|
|
||||||
|
### Job control
|
||||||
|
|
||||||
|
To create a job call the queue's `download()` method. You can list all
|
||||||
|
jobs using `list_jobs()`, fetch a single job by its with
|
||||||
|
`id_to_job()`, cancel a running job with `cancel_job()`, cancel all
|
||||||
|
running jobs with `cancel_all_jobs()`, and wait for all jobs to finish
|
||||||
|
with `join()`.
|
||||||
|
|
||||||
|
#### job = queue.download(source, dest, priority, access_token)
|
||||||
|
|
||||||
|
Create a new download job and put it on the queue, returning the
|
||||||
|
DownloadJob object.
|
||||||
|
|
||||||
|
#### jobs = queue.list_jobs()
|
||||||
|
|
||||||
|
Return a list of all active and inactive `DownloadJob`s.
|
||||||
|
|
||||||
|
#### job = queue.id_to_job(id)
|
||||||
|
|
||||||
|
Return the job corresponding to given ID.
|
||||||
|
|
||||||
|
Return a list of all active and inactive `DownloadJob`s.
|
||||||
|
|
||||||
|
#### queue.prune_jobs()
|
||||||
|
|
||||||
|
Remove inactive (complete or errored) jobs from the listing returned
|
||||||
|
by `list_jobs()`.
|
||||||
|
|
||||||
|
#### queue.join()
|
||||||
|
|
||||||
|
Block until all pending jobs have run to completion or errored out.
|
||||||
|
|
@ -1,6 +1,6 @@
|
|||||||
# Invocations
|
# Nodes
|
||||||
|
|
||||||
Features in InvokeAI are added in the form of modular node-like systems called
|
Features in InvokeAI are added in the form of modular nodes systems called
|
||||||
**Invocations**.
|
**Invocations**.
|
||||||
|
|
||||||
An Invocation is simply a single operation that takes in some inputs and gives
|
An Invocation is simply a single operation that takes in some inputs and gives
|
||||||
@ -9,13 +9,34 @@ complex functionality.
|
|||||||
|
|
||||||
## Invocations Directory
|
## Invocations Directory
|
||||||
|
|
||||||
InvokeAI Invocations can be found in the `invokeai/app/invocations` directory.
|
InvokeAI Nodes can be found in the `invokeai/app/invocations` directory. These can be used as examples to create your own nodes.
|
||||||
|
|
||||||
You can add your new functionality to one of the existing Invocations in this
|
New nodes should be added to a subfolder in `nodes` direction found at the root level of the InvokeAI installation location. Nodes added to this folder will be able to be used upon application startup.
|
||||||
directory or create a new file in this directory as per your needs.
|
|
||||||
|
Example `nodes` subfolder structure:
|
||||||
|
```py
|
||||||
|
├── __init__.py # Invoke-managed custom node loader
|
||||||
|
│
|
||||||
|
├── cool_node
|
||||||
|
│ ├── __init__.py # see example below
|
||||||
|
│ └── cool_node.py
|
||||||
|
│
|
||||||
|
└── my_node_pack
|
||||||
|
├── __init__.py # see example below
|
||||||
|
├── tasty_node.py
|
||||||
|
├── bodacious_node.py
|
||||||
|
├── utils.py
|
||||||
|
└── extra_nodes
|
||||||
|
└── fancy_node.py
|
||||||
|
```
|
||||||
|
|
||||||
|
Each node folder must have an `__init__.py` file that imports its nodes. Only nodes imported in the `__init__.py` file are loaded.
|
||||||
|
See the README in the nodes folder for more examples:
|
||||||
|
|
||||||
|
```py
|
||||||
|
from .cool_node import CoolInvocation
|
||||||
|
```
|
||||||
|
|
||||||
**Note:** _All Invocations must be inside this directory for InvokeAI to
|
|
||||||
recognize them as valid Invocations._
|
|
||||||
|
|
||||||
## Creating A New Invocation
|
## Creating A New Invocation
|
||||||
|
|
||||||
@ -44,7 +65,7 @@ The first set of things we need to do when creating a new Invocation are -
|
|||||||
So let us do that.
|
So let us do that.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from .baseinvocation import BaseInvocation, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
||||||
|
|
||||||
@invocation('resize')
|
@invocation('resize')
|
||||||
class ResizeInvocation(BaseInvocation):
|
class ResizeInvocation(BaseInvocation):
|
||||||
@ -78,8 +99,8 @@ create your own custom field types later in this guide. For now, let's go ahead
|
|||||||
and use it.
|
and use it.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation
|
||||||
from .primitives import ImageField
|
from invokeai.app.invocations.primitives import ImageField
|
||||||
|
|
||||||
@invocation('resize')
|
@invocation('resize')
|
||||||
class ResizeInvocation(BaseInvocation):
|
class ResizeInvocation(BaseInvocation):
|
||||||
@ -103,8 +124,8 @@ image: ImageField = InputField(description="The input image")
|
|||||||
Great. Now let us create our other inputs for `width` and `height`
|
Great. Now let us create our other inputs for `width` and `height`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation
|
||||||
from .primitives import ImageField
|
from invokeai.app.invocations.primitives import ImageField
|
||||||
|
|
||||||
@invocation('resize')
|
@invocation('resize')
|
||||||
class ResizeInvocation(BaseInvocation):
|
class ResizeInvocation(BaseInvocation):
|
||||||
@ -139,8 +160,8 @@ that are provided by it by InvokeAI.
|
|||||||
Let us create this function first.
|
Let us create this function first.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation, InvocationContext
|
||||||
from .primitives import ImageField
|
from invokeai.app.invocations.primitives import ImageField
|
||||||
|
|
||||||
@invocation('resize')
|
@invocation('resize')
|
||||||
class ResizeInvocation(BaseInvocation):
|
class ResizeInvocation(BaseInvocation):
|
||||||
@ -168,9 +189,9 @@ all the necessary info related to image outputs. So let us use that.
|
|||||||
We will cover how to create your own output types later in this guide.
|
We will cover how to create your own output types later in this guide.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation, InvocationContext
|
||||||
from .primitives import ImageField
|
from invokeai.app.invocations.primitives import ImageField
|
||||||
from .image import ImageOutput
|
from invokeai.app.invocations.image import ImageOutput
|
||||||
|
|
||||||
@invocation('resize')
|
@invocation('resize')
|
||||||
class ResizeInvocation(BaseInvocation):
|
class ResizeInvocation(BaseInvocation):
|
||||||
@ -195,9 +216,9 @@ Perfect. Now that we have our Invocation setup, let us do what we want to do.
|
|||||||
So let's do that.
|
So let's do that.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from .baseinvocation import BaseInvocation, InputField, invocation
|
from invokeai.app.invocations.baseinvocation import BaseInvocation, InputField, invocation, InvocationContext
|
||||||
from .primitives import ImageField
|
from invokeai.app.invocations.primitives import ImageField
|
||||||
from .image import ImageOutput
|
from invokeai.app.invocations.image import ImageOutput, ResourceOrigin, ImageCategory
|
||||||
|
|
||||||
@invocation("resize")
|
@invocation("resize")
|
||||||
class ResizeInvocation(BaseInvocation):
|
class ResizeInvocation(BaseInvocation):
|
||||||
|
1565
docs/contributing/MODEL_MANAGER.md
Normal file
@ -1,75 +0,0 @@
|
|||||||
# Contributing to the Frontend
|
|
||||||
|
|
||||||
# InvokeAI Web UI
|
|
||||||
|
|
||||||
- [InvokeAI Web UI](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#invokeai-web-ui)
|
|
||||||
- [Stack](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#stack)
|
|
||||||
- [Contributing](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#contributing)
|
|
||||||
- [Dev Environment](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#dev-environment)
|
|
||||||
- [Production builds](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web/docs#production-builds)
|
|
||||||
|
|
||||||
The UI is a fairly straightforward Typescript React app, with the Unified Canvas being more complex.
|
|
||||||
|
|
||||||
Code is located in `invokeai/frontend/web/` for review.
|
|
||||||
|
|
||||||
## Stack
|
|
||||||
|
|
||||||
State management is Redux via [Redux Toolkit](https://github.com/reduxjs/redux-toolkit). We lean heavily on RTK:
|
|
||||||
|
|
||||||
- `createAsyncThunk` for HTTP requests
|
|
||||||
- `createEntityAdapter` for fetching images and models
|
|
||||||
- `createListenerMiddleware` for workflows
|
|
||||||
|
|
||||||
The API client and associated types are generated from the OpenAPI schema. See API_CLIENT.md.
|
|
||||||
|
|
||||||
Communication with server is a mix of HTTP and [socket.io](https://github.com/socketio/socket.io-client) (with a simple socket.io redux middleware to help).
|
|
||||||
|
|
||||||
[Chakra-UI](https://github.com/chakra-ui/chakra-ui) & [Mantine](https://github.com/mantinedev/mantine) for components and styling.
|
|
||||||
|
|
||||||
[Konva](https://github.com/konvajs/react-konva) for the canvas, but we are pushing the limits of what is feasible with it (and HTML canvas in general). We plan to rebuild it with [PixiJS](https://github.com/pixijs/pixijs) to take advantage of WebGL's improved raster handling.
|
|
||||||
|
|
||||||
[Vite](https://vitejs.dev/) for bundling.
|
|
||||||
|
|
||||||
Localisation is via [i18next](https://github.com/i18next/react-i18next), but translation happens on our [Weblate](https://hosted.weblate.org/engage/invokeai/) project. Only the English source strings should be changed on this repo.
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
Thanks for your interest in contributing to the InvokeAI Web UI!
|
|
||||||
|
|
||||||
We encourage you to ping @psychedelicious and @blessedcoolant on [Discord](https://discord.gg/ZmtBAhwWhy) if you want to contribute, just to touch base and ensure your work doesn't conflict with anything else going on. The project is very active.
|
|
||||||
|
|
||||||
### Dev Environment
|
|
||||||
|
|
||||||
**Setup**
|
|
||||||
|
|
||||||
1. Install [node](https://nodejs.org/en/download/). You can confirm node is installed with:
|
|
||||||
```bash
|
|
||||||
node --version
|
|
||||||
```
|
|
||||||
2. Install [yarn classic](https://classic.yarnpkg.com/lang/en/) and confirm it is installed by running this:
|
|
||||||
```bash
|
|
||||||
npm install --global yarn
|
|
||||||
yarn --version
|
|
||||||
```
|
|
||||||
|
|
||||||
From `invokeai/frontend/web/` run `yarn install` to get everything set up.
|
|
||||||
|
|
||||||
Start everything in dev mode:
|
|
||||||
1. Ensure your virtual environment is running
|
|
||||||
2. Start the dev server: `yarn dev`
|
|
||||||
3. Start the InvokeAI Nodes backend: `python scripts/invokeai-web.py # run from the repo root`
|
|
||||||
4. Point your browser to the dev server address e.g. [http://localhost:5173/](http://localhost:5173/)
|
|
||||||
|
|
||||||
### VSCode Remote Dev
|
|
||||||
|
|
||||||
We've noticed an intermittent issue with the VSCode Remote Dev port forwarding. If you use this feature of VSCode, you may intermittently click the Invoke button and then get nothing until the request times out. Suggest disabling the IDE's port forwarding feature and doing it manually via SSH:
|
|
||||||
|
|
||||||
`ssh -L 9090:localhost:9090 -L 5173:localhost:5173 user@host`
|
|
||||||
|
|
||||||
### Production builds
|
|
||||||
|
|
||||||
For a number of technical and logistical reasons, we need to commit UI build artefacts to the repo.
|
|
||||||
|
|
||||||
If you submit a PR, there is a good chance we will ask you to include a separate commit with a build of the app.
|
|
||||||
|
|
||||||
To build for production, run `yarn build`.
|
|
@ -12,7 +12,7 @@ To get started, take a look at our [new contributors checklist](newContributorCh
|
|||||||
Once you're setup, for more information, you can review the documentation specific to your area of interest:
|
Once you're setup, for more information, you can review the documentation specific to your area of interest:
|
||||||
|
|
||||||
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
|
* #### [InvokeAI Architecure](../ARCHITECTURE.md)
|
||||||
* #### [Frontend Documentation](./contributingToFrontend.md)
|
* #### [Frontend Documentation](https://github.com/invoke-ai/InvokeAI/tree/main/invokeai/frontend/web)
|
||||||
* #### [Node Documentation](../INVOCATIONS.md)
|
* #### [Node Documentation](../INVOCATIONS.md)
|
||||||
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
|
* #### [Local Development](../LOCAL_DEVELOPMENT.md)
|
||||||
|
|
||||||
@ -45,5 +45,5 @@ For backend related work, please reach out to **@blessedcoolant**, **@lstein**,
|
|||||||
|
|
||||||
## **What does the Code of Conduct mean for me?**
|
## **What does the Code of Conduct mean for me?**
|
||||||
|
|
||||||
Our [Code of Conduct](CODE_OF_CONDUCT.md) means that you are responsible for treating everyone on the project with respect and courtesy regardless of their identity. If you are the victim of any inappropriate behavior or comments as described in our Code of Conduct, we are here for you and will do the best to ensure that the abuser is reprimanded appropriately, per our code.
|
Our [Code of Conduct](../../CODE_OF_CONDUCT.md) means that you are responsible for treating everyone on the project with respect and courtesy regardless of their identity. If you are the victim of any inappropriate behavior or comments as described in our Code of Conduct, we are here for you and will do the best to ensure that the abuser is reprimanded appropriately, per our code.
|
||||||
|
|
||||||
|
53
docs/deprecated/2to3.md
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
## :octicons-log-16: Important Changes Since Version 2.3
|
||||||
|
|
||||||
|
### Nodes
|
||||||
|
|
||||||
|
Behind the scenes, InvokeAI has been completely rewritten to support
|
||||||
|
"nodes," small unitary operations that can be combined into graphs to
|
||||||
|
form arbitrary workflows. For example, there is a prompt node that
|
||||||
|
processes the prompt string and feeds it to a text2latent node that
|
||||||
|
generates a latent image. The latents are then fed to a latent2image
|
||||||
|
node that translates the latent image into a PNG.
|
||||||
|
|
||||||
|
The WebGUI has a node editor that allows you to graphically design and
|
||||||
|
execute custom node graphs. The ability to save and load graphs is
|
||||||
|
still a work in progress, but coming soon.
|
||||||
|
|
||||||
|
### Command-Line Interface Retired
|
||||||
|
|
||||||
|
All "invokeai" command-line interfaces have been retired as of version
|
||||||
|
3.4.
|
||||||
|
|
||||||
|
To launch the Web GUI from the command-line, use the command
|
||||||
|
`invokeai-web` rather than the traditional `invokeai --web`.
|
||||||
|
|
||||||
|
### ControlNet
|
||||||
|
|
||||||
|
This version of InvokeAI features ControlNet, a system that allows you
|
||||||
|
to achieve exact poses for human and animal figures by providing a
|
||||||
|
model to follow. Full details are found in [ControlNet](features/CONTROLNET.md)
|
||||||
|
|
||||||
|
### New Schedulers
|
||||||
|
|
||||||
|
The list of schedulers has been completely revamped and brought up to date:
|
||||||
|
|
||||||
|
| **Short Name** | **Scheduler** | **Notes** |
|
||||||
|
|----------------|---------------------------------|-----------------------------|
|
||||||
|
| **ddim** | DDIMScheduler | |
|
||||||
|
| **ddpm** | DDPMScheduler | |
|
||||||
|
| **deis** | DEISMultistepScheduler | |
|
||||||
|
| **lms** | LMSDiscreteScheduler | |
|
||||||
|
| **pndm** | PNDMScheduler | |
|
||||||
|
| **heun** | HeunDiscreteScheduler | original noise schedule |
|
||||||
|
| **heun_k** | HeunDiscreteScheduler | using karras noise schedule |
|
||||||
|
| **euler** | EulerDiscreteScheduler | original noise schedule |
|
||||||
|
| **euler_k** | EulerDiscreteScheduler | using karras noise schedule |
|
||||||
|
| **kdpm_2** | KDPM2DiscreteScheduler | |
|
||||||
|
| **kdpm_2_a** | KDPM2AncestralDiscreteScheduler | |
|
||||||
|
| **dpmpp_2s** | DPMSolverSinglestepScheduler | |
|
||||||
|
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
|
||||||
|
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
|
||||||
|
| **unipc** | UniPCMultistepScheduler | CPU only |
|
||||||
|
| **lcm** | LCMScheduler | |
|
||||||
|
|
||||||
|
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.
|
@ -211,8 +211,8 @@ Here are the invoke> command that apply to txt2img:
|
|||||||
| `--facetool <name>` | `-ft <name>` | `-ft gfpgan` | Select face restoration algorithm to use: gfpgan, codeformer |
|
| `--facetool <name>` | `-ft <name>` | `-ft gfpgan` | Select face restoration algorithm to use: gfpgan, codeformer |
|
||||||
| `--codeformer_fidelity` | `-cf <float>` | `0.75` | Used along with CodeFormer. Takes values between 0 and 1. 0 produces high quality but low accuracy. 1 produces high accuracy but low quality |
|
| `--codeformer_fidelity` | `-cf <float>` | `0.75` | Used along with CodeFormer. Takes values between 0 and 1. 0 produces high quality but low accuracy. 1 produces high accuracy but low quality |
|
||||||
| `--save_original` | `-save_orig` | `False` | When upscaling or fixing faces, this will cause the original image to be saved rather than replaced. |
|
| `--save_original` | `-save_orig` | `False` | When upscaling or fixing faces, this will cause the original image to be saved rather than replaced. |
|
||||||
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](../features/VARIATIONS.md). |
|
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](VARIATIONS.md). |
|
||||||
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](../features/VARIATIONS.md) for now to use this. |
|
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](VARIATIONS.md) for now to use this. |
|
||||||
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
||||||
| `--h_symmetry_time_pct <float>` | | `None` | Create symmetry along the X axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
| `--h_symmetry_time_pct <float>` | | `None` | Create symmetry along the X axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
||||||
| `--v_symmetry_time_pct <float>` | | `None` | Create symmetry along the Y axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
| `--v_symmetry_time_pct <float>` | | `None` | Create symmetry along the Y axis at the desired percent complete of the generation process. (Must be between 0.0 and 1.0; set to a very small number like 0.0001 for just after the first step of generation.) |
|
||||||
|
131
docs/deprecated/VARIATIONS.md
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
---
|
||||||
|
title: Variations
|
||||||
|
---
|
||||||
|
|
||||||
|
# :material-tune-variant: Variations
|
||||||
|
|
||||||
|
## Intro
|
||||||
|
|
||||||
|
InvokeAI's support for variations enables you to do the following:
|
||||||
|
|
||||||
|
1. Generate a series of systematic variations of an image, given a prompt. The
|
||||||
|
amount of variation from one image to the next can be controlled.
|
||||||
|
|
||||||
|
2. Given two or more variations that you like, you can combine them in a
|
||||||
|
weighted fashion.
|
||||||
|
|
||||||
|
!!! Information ""
|
||||||
|
|
||||||
|
This cheat sheet provides a quick guide for how this works in practice, using
|
||||||
|
variations to create the desired image of Xena, Warrior Princess.
|
||||||
|
|
||||||
|
## Step 1 -- Find a base image that you like
|
||||||
|
|
||||||
|
The prompt we will use throughout is:
|
||||||
|
|
||||||
|
`#!bash "lucy lawless as xena, warrior princess, character portrait, high resolution."`
|
||||||
|
|
||||||
|
This will be indicated as `#!bash "prompt"` in the examples below.
|
||||||
|
|
||||||
|
First we let SD create a series of images in the usual way, in this case
|
||||||
|
requesting six iterations.
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|

|
||||||
|
<figcaption> Seed 3357757885 looks nice </figcaption>
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Step 2 - Generating Variations
|
||||||
|
|
||||||
|
Let's try to generate some variations on this image. We select the "*"
|
||||||
|
symbol in the line of icons above the image in order to fix the prompt
|
||||||
|
and seed. Then we open up the "Variations" section of the generation
|
||||||
|
panel and use the slider to set the variation amount to 0.2. The
|
||||||
|
higher this value, the more each generated image will differ from the
|
||||||
|
previous one.
|
||||||
|
|
||||||
|
Now we run the prompt a second time, requesting six iterations. You
|
||||||
|
will see six images that are thematically related to each other. Try
|
||||||
|
increasing and decreasing the variation amount and see what happens.
|
||||||
|
|
||||||
|
### **Variation Sub Seeding**
|
||||||
|
|
||||||
|
Note that the output for each image has a `-V` option giving the "variant
|
||||||
|
subseed" for that image, consisting of a seed followed by the variation amount
|
||||||
|
used to generate it.
|
||||||
|
|
||||||
|
This gives us a series of closely-related variations, including the two shown
|
||||||
|
here.
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|

|
||||||
|
<figcaption>subseed 3647897225</figcaption>
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|

|
||||||
|
<figcaption>subseed 1614299449</figcaption>
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
I like the expression on Xena's face in the first one (subseed 3647897225), and
|
||||||
|
the armor on her shoulder in the second one (subseed 1614299449). Can we combine
|
||||||
|
them to get the best of both worlds?
|
||||||
|
|
||||||
|
We combine the two variations using `-V` (`--with_variations`). Again, we must
|
||||||
|
provide the seed for the originally-chosen image in order for this to work.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke> "prompt" -S3357757885 -V3647897225,0.1,1614299449,0.1
|
||||||
|
Outputs:
|
||||||
|
./outputs/Xena/000003.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1 -S3357757885
|
||||||
|
```
|
||||||
|
|
||||||
|
Here we are providing equal weights (0.1 and 0.1) for both the subseeds. The
|
||||||
|
resulting image is close, but not exactly what I wanted:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|

|
||||||
|
<figcaption> subseed 1614299449 </figcaption>
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
We could either try combining the images with different weights, or we can
|
||||||
|
generate more variations around the almost-but-not-quite image. We do the
|
||||||
|
latter, using both the `-V` (combining) and `-v` (variation strength) options.
|
||||||
|
Note that we use `-n6` to generate 6 variations:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke> "prompt" -S3357757885 -V3647897225,0.1,1614299449,0.1 -v0.05 -n6
|
||||||
|
Outputs:
|
||||||
|
./outputs/Xena/000004.3279757577.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,3279757577:0.05 -S3357757885
|
||||||
|
./outputs/Xena/000004.2853129515.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2853129515:0.05 -S3357757885
|
||||||
|
./outputs/Xena/000004.3747154981.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,3747154981:0.05 -S3357757885
|
||||||
|
./outputs/Xena/000004.2664260391.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2664260391:0.05 -S3357757885
|
||||||
|
./outputs/Xena/000004.1642517170.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,1642517170:0.05 -S3357757885
|
||||||
|
./outputs/Xena/000004.2183375608.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2183375608:0.05 -S3357757885
|
||||||
|
```
|
||||||
|
|
||||||
|
This produces six images, all slight variations on the combination of the chosen
|
||||||
|
two images. Here's the one I like best:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|

|
||||||
|
<figcaption> subseed 3747154981 </figcaption>
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
As you can see, this is a very powerful tool, which when combined with subprompt
|
||||||
|
weighting, gives you great control over the content and quality of your
|
||||||
|
generated images.
|
||||||
|
|
||||||
|
## Variations and Samplers
|
||||||
|
|
||||||
|
The sampler you choose has a strong effect on variation strength. Some
|
||||||
|
samplers, such as `k_euler_a` are very "creative" and produce significant
|
||||||
|
amounts of image-to-image variation even when the seed is fixed and the
|
||||||
|
`-v` argument is very low. Others are more deterministic. Feel free to
|
||||||
|
experiment until you find the combination that you like.
|
||||||
|
|
||||||
|
Also be aware of the [Perlin Noise](../features/OTHER.md#thresholding-and-perlin-noise-initialization-options)
|
||||||
|
feature, which provides another way of introducing variability into your
|
||||||
|
image generation requests.
|
@ -1,88 +0,0 @@
|
|||||||
---
|
|
||||||
title: Textual Inversion Embeddings and LoRAs
|
|
||||||
---
|
|
||||||
|
|
||||||
# :material-library-shelves: Textual Inversions and LoRAs
|
|
||||||
|
|
||||||
With the advances in research, many new capabilities are available to customize the knowledge and understanding of novel concepts not originally contained in the base model.
|
|
||||||
|
|
||||||
|
|
||||||
## Using Textual Inversion Files
|
|
||||||
|
|
||||||
Textual inversion (TI) files are small models that customize the output of
|
|
||||||
Stable Diffusion image generation. They can augment SD with specialized subjects
|
|
||||||
and artistic styles. They are also known as "embeds" in the machine learning
|
|
||||||
world.
|
|
||||||
|
|
||||||
Each TI file introduces one or more vocabulary terms to the SD model. These are
|
|
||||||
known in InvokeAI as "triggers." Triggers are denoted using angle brackets
|
|
||||||
as in "<trigger-phrase>". The two most common type of
|
|
||||||
TI files that you'll encounter are `.pt` and `.bin` files, which are produced by
|
|
||||||
different TI training packages. InvokeAI supports both formats, but its
|
|
||||||
[built-in TI training system](TRAINING.md) produces `.pt`.
|
|
||||||
|
|
||||||
[Hugging Face](https://huggingface.co/sd-concepts-library) has
|
|
||||||
amassed a large library of >800 community-contributed TI files covering a
|
|
||||||
broad range of subjects and styles. You can also install your own or others' TI files
|
|
||||||
by placing them in the designated directory for the compatible model type
|
|
||||||
|
|
||||||
### An Example
|
|
||||||
|
|
||||||
Here are a few examples to illustrate how it works. All these images were
|
|
||||||
generated using the command-line client and the Stable Diffusion 1.5 model:
|
|
||||||
|
|
||||||
| Japanese gardener | Japanese gardener <ghibli-face> | Japanese gardener <hoi4-leaders> | Japanese gardener <cartoona-animals> |
|
|
||||||
| :--------------------------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------: |
|
|
||||||
|  |  |  |  |
|
|
||||||
|
|
||||||
You can also combine styles and concepts:
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
| A portrait of <alf> in <cartoona-animal> style |
|
|
||||||
| :--------------------------------------------------------: |
|
|
||||||
|  |
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
|
|
||||||
## Installing your Own TI Files
|
|
||||||
|
|
||||||
You may install any number of `.pt` and `.bin` files simply by copying them into
|
|
||||||
the `embedding` directory of the corresponding InvokeAI models directory (usually `invokeai`
|
|
||||||
in your home directory). For example, you can simply move a Stable Diffusion 1.5 embedding file to
|
|
||||||
the `sd-1/embedding` folder. Be careful not to overwrite one file with another.
|
|
||||||
For example, TI files generated by the Hugging Face toolkit share the named
|
|
||||||
`learned_embedding.bin`. You can rename these, or use subdirectories to keep them distinct.
|
|
||||||
|
|
||||||
At startup time, InvokeAI will scan the various `embedding` directories and load any TI
|
|
||||||
files it finds there for compatible models. At startup you will see a message similar to this one:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
>> Current embedding manager terms: <HOI4-Leader>, <princess-knight>
|
|
||||||
```
|
|
||||||
To use these when generating, simply type the `<` key in your prompt to open the Textual Inversion WebUI and
|
|
||||||
select the embedding you'd like to use. This UI has type-ahead support, so you can easily find supported embeddings.
|
|
||||||
|
|
||||||
## Using LoRAs
|
|
||||||
|
|
||||||
LoRA files are models that customize the output of Stable Diffusion
|
|
||||||
image generation. Larger than embeddings, but much smaller than full
|
|
||||||
models, they augment SD with improved understanding of subjects and
|
|
||||||
artistic styles.
|
|
||||||
|
|
||||||
Unlike TI files, LoRAs do not introduce novel vocabulary into the
|
|
||||||
model's known tokens. Instead, LoRAs augment the model's weights that
|
|
||||||
are applied to generate imagery. LoRAs may be supplied with a
|
|
||||||
"trigger" word that they have been explicitly trained on, or may
|
|
||||||
simply apply their effect without being triggered.
|
|
||||||
|
|
||||||
LoRAs are typically stored in .safetensors files, which are the most
|
|
||||||
secure way to store and transmit these types of weights. You may
|
|
||||||
install any number of `.safetensors` LoRA files simply by copying them
|
|
||||||
into the `autoimport/lora` directory of the corresponding InvokeAI models
|
|
||||||
directory (usually `invokeai` in your home directory).
|
|
||||||
|
|
||||||
To use these when generating, open the LoRA menu item in the options
|
|
||||||
panel, select the LoRAs you want to apply and ensure that they have
|
|
||||||
the appropriate weight recommended by the model provider. Typically,
|
|
||||||
most LoRAs perform best at a weight of .75-1.
|
|
||||||
|
|
@ -82,7 +82,7 @@ format of YAML files can be found
|
|||||||
[here](https://circleci.com/blog/what-is-yaml-a-beginner-s-guide/).
|
[here](https://circleci.com/blog/what-is-yaml-a-beginner-s-guide/).
|
||||||
|
|
||||||
You can fix a broken `invokeai.yaml` by deleting it and running the
|
You can fix a broken `invokeai.yaml` by deleting it and running the
|
||||||
configuration script again -- option [7] in the launcher, "Re-run the
|
configuration script again -- option [6] in the launcher, "Re-run the
|
||||||
configure script".
|
configure script".
|
||||||
|
|
||||||
#### Reading Environment Variables
|
#### Reading Environment Variables
|
||||||
@ -155,13 +155,15 @@ groups in `invokeia.yaml`:
|
|||||||
### Web Server
|
### Web Server
|
||||||
|
|
||||||
| Setting | Default Value | Description |
|
| Setting | Default Value | Description |
|
||||||
|----------|----------------|--------------|
|
|---------------------|---------------|----------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `host` | `localhost` | Name or IP address of the network interface that the web server will listen on |
|
| `host` | `localhost` | Name or IP address of the network interface that the web server will listen on |
|
||||||
| `port` | `9090` | Network port number that the web server will listen on |
|
| `port` | `9090` | Network port number that the web server will listen on |
|
||||||
| `allow_origins` | `[]` | A list of host names or IP addresses that are allowed to connect to the InvokeAI API in the format `['host1','host2',...]` |
|
| `allow_origins` | `[]` | A list of host names or IP addresses that are allowed to connect to the InvokeAI API in the format `['host1','host2',...]` |
|
||||||
| `allow_credentials` | `true` | Require credentials for a foreign host to access the InvokeAI API (don't change this) |
|
| `allow_credentials` | `true` | Require credentials for a foreign host to access the InvokeAI API (don't change this) |
|
||||||
| `allow_methods` | `*` | List of HTTP methods ("GET", "POST") that the web server is allowed to use when accessing the API |
|
| `allow_methods` | `*` | List of HTTP methods ("GET", "POST") that the web server is allowed to use when accessing the API |
|
||||||
| `allow_headers` | `*` | List of HTTP headers that the web server will accept when accessing the API |
|
| `allow_headers` | `*` | List of HTTP headers that the web server will accept when accessing the API |
|
||||||
|
| `ssl_certfile` | null | Path to an SSL certificate file, used to enable HTTPS. |
|
||||||
|
| `ssl_keyfile` | null | Path to an SSL keyfile, if the key is not included in the certificate file. |
|
||||||
|
|
||||||
The documentation for InvokeAI's API can be accessed by browsing to the following URL: [http://localhost:9090/docs].
|
The documentation for InvokeAI's API can be accessed by browsing to the following URL: [http://localhost:9090/docs].
|
||||||
|
|
||||||
|
@ -17,9 +17,6 @@ image generation, providing you with a way to direct the network
|
|||||||
towards generating images that better fit your desired style or
|
towards generating images that better fit your desired style or
|
||||||
outcome.
|
outcome.
|
||||||
|
|
||||||
|
|
||||||
#### How it works
|
|
||||||
|
|
||||||
ControlNet works by analyzing an input image, pre-processing that
|
ControlNet works by analyzing an input image, pre-processing that
|
||||||
image to identify relevant information that can be interpreted by each
|
image to identify relevant information that can be interpreted by each
|
||||||
specific ControlNet model, and then inserting that control information
|
specific ControlNet model, and then inserting that control information
|
||||||
@ -27,35 +24,21 @@ into the generation process. This can be used to adjust the style,
|
|||||||
composition, or other aspects of the image to better achieve a
|
composition, or other aspects of the image to better achieve a
|
||||||
specific result.
|
specific result.
|
||||||
|
|
||||||
|
#### Installation
|
||||||
#### Models
|
|
||||||
|
|
||||||
InvokeAI provides access to a series of ControlNet models that provide
|
InvokeAI provides access to a series of ControlNet models that provide
|
||||||
different effects or styles in your generated images. Currently
|
different effects or styles in your generated images.
|
||||||
InvokeAI only supports "diffuser" style ControlNet models. These are
|
|
||||||
folders that contain the files `config.json` and/or
|
|
||||||
`diffusion_pytorch_model.safetensors` and
|
|
||||||
`diffusion_pytorch_model.fp16.safetensors`. The name of the folder is
|
|
||||||
the name of the model.
|
|
||||||
|
|
||||||
***InvokeAI does not currently support checkpoint-format
|
To install ControlNet Models:
|
||||||
ControlNets. These come in the form of a single file with the
|
|
||||||
extension `.safetensors`.***
|
|
||||||
|
|
||||||
Diffuser-style ControlNet models are available at HuggingFace
|
1. The easiest way to install them is
|
||||||
(http://huggingface.co) and accessed via their repo IDs (identifiers
|
|
||||||
in the format "author/modelname"). The easiest way to install them is
|
|
||||||
to use the InvokeAI model installer application. Use the
|
to use the InvokeAI model installer application. Use the
|
||||||
`invoke.sh`/`invoke.bat` launcher to select item [5] and then navigate
|
`invoke.sh`/`invoke.bat` launcher to select item [4] and then navigate
|
||||||
to the CONTROLNETS section. Select the models you wish to install and
|
to the CONTROLNETS section. Select the models you wish to install and
|
||||||
press "APPLY CHANGES". You may also enter additional HuggingFace
|
press "APPLY CHANGES". You may also enter additional HuggingFace
|
||||||
repo_ids in the "Additional models" textbox:
|
repo_ids in the "Additional models" textbox.
|
||||||
|
2. Using the "Add Model" function of the model manager, enter the HuggingFace Repo ID of the ControlNet. The ID is in the format "author/repoName"
|
||||||
|
|
||||||
{:width="640px"}
|
|
||||||
|
|
||||||
Command-line users can launch the model installer using the command
|
|
||||||
`invokeai-model-install`.
|
|
||||||
|
|
||||||
_Be aware that some ControlNet models require additional code
|
_Be aware that some ControlNet models require additional code
|
||||||
functionality in order to work properly, so just installing a
|
functionality in order to work properly, so just installing a
|
||||||
@ -63,6 +46,17 @@ third-party ControlNet model may not have the desired effect._ Please
|
|||||||
read and follow the documentation for installing a third party model
|
read and follow the documentation for installing a third party model
|
||||||
not currently included among InvokeAI's default list.
|
not currently included among InvokeAI's default list.
|
||||||
|
|
||||||
|
Currently InvokeAI **only** supports 🤗 Diffusers-format ControlNet models. These are
|
||||||
|
folders that contain the files `config.json` and/or
|
||||||
|
`diffusion_pytorch_model.safetensors` and
|
||||||
|
`diffusion_pytorch_model.fp16.safetensors`. The name of the folder is
|
||||||
|
the name of the model.
|
||||||
|
|
||||||
|
🤗 Diffusers-format ControlNet models are available at HuggingFace
|
||||||
|
(http://huggingface.co) and accessed via their repo IDs (identifiers
|
||||||
|
in the format "author/modelname").
|
||||||
|
|
||||||
|
#### ControlNet Models
|
||||||
The models currently supported include:
|
The models currently supported include:
|
||||||
|
|
||||||
**Canny**:
|
**Canny**:
|
||||||
@ -100,6 +94,8 @@ A model that helps generate creative QR codes that still scan. Can also be used
|
|||||||
**Openpose**:
|
**Openpose**:
|
||||||
The OpenPose control model allows for the identification of the general pose of a character by pre-processing an existing image with a clear human structure. With advanced options, Openpose can also detect the face or hands in the image.
|
The OpenPose control model allows for the identification of the general pose of a character by pre-processing an existing image with a clear human structure. With advanced options, Openpose can also detect the face or hands in the image.
|
||||||
|
|
||||||
|
*Note:* The DWPose Processor has replaced the OpenPose processor in Invoke. Workflows and generations that relied on the OpenPose Processor will need to be updated to use the DWPose Processor instead.
|
||||||
|
|
||||||
**Mediapipe Face**:
|
**Mediapipe Face**:
|
||||||
|
|
||||||
The MediaPipe Face identification processor is able to clearly identify facial features in order to capture vivid expressions of human faces.
|
The MediaPipe Face identification processor is able to clearly identify facial features in order to capture vivid expressions of human faces.
|
||||||
@ -133,6 +129,29 @@ Start/End - 0 represents the start of the generation, 1 represents the end. The
|
|||||||
|
|
||||||
Additionally, each ControlNet section can be expanded in order to manipulate settings for the image pre-processor that adjusts your uploaded image before using it in when you Invoke.
|
Additionally, each ControlNet section can be expanded in order to manipulate settings for the image pre-processor that adjusts your uploaded image before using it in when you Invoke.
|
||||||
|
|
||||||
|
## T2I-Adapter
|
||||||
|
[T2I-Adapter](https://github.com/TencentARC/T2I-Adapter) is a tool similar to ControlNet that allows for control over the generation process by providing control information during the generation process. T2I-Adapter models tend to be smaller and more efficient than ControlNets.
|
||||||
|
|
||||||
|
##### Installation
|
||||||
|
To install T2I-Adapter Models:
|
||||||
|
|
||||||
|
1. The easiest way to install models is
|
||||||
|
to use the InvokeAI model installer application. Use the
|
||||||
|
`invoke.sh`/`invoke.bat` launcher to select item [5] and then navigate
|
||||||
|
to the T2I-Adapters section. Select the models you wish to install and
|
||||||
|
press "APPLY CHANGES". You may also enter additional HuggingFace
|
||||||
|
repo_ids in the "Additional models" textbox.
|
||||||
|
2. Using the "Add Model" function of the model manager, enter the HuggingFace Repo ID of the T2I-Adapter. The ID is in the format "author/repoName"
|
||||||
|
|
||||||
|
#### Usage
|
||||||
|
Each T2I Adapter has two settings that are applied.
|
||||||
|
|
||||||
|
Weight - Strength of the model applied to the generation for the section, defined by start/end.
|
||||||
|
|
||||||
|
Start/End - 0 represents the start of the generation, 1 represents the end. The Start/end setting controls what steps during the generation process have the ControlNet applied.
|
||||||
|
|
||||||
|
Additionally, each section can be expanded with the "Show Advanced" button in order to manipulate settings for the image pre-processor that adjusts your uploaded image before using it in during the generation process.
|
||||||
|
|
||||||
|
|
||||||
## IP-Adapter
|
## IP-Adapter
|
||||||
|
|
||||||
@ -140,13 +159,13 @@ Additionally, each ControlNet section can be expanded in order to manipulate set
|
|||||||
|
|
||||||

|

|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
#### Installation
|
#### Installation
|
||||||
There are several ways to install IP-Adapter models with an existing InvokeAI installation:
|
There are several ways to install IP-Adapter models with an existing InvokeAI installation:
|
||||||
|
|
||||||
1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [5] to download models.
|
1. Through the command line interface launched from the invoke.sh / invoke.bat scripts, option [4] to download models.
|
||||||
2. Through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. **Note** Both the IP-Adapter and the Image Encoder must be installed for IP-Adapter to work. For example, the [SD 1.5 IP-Adapter](https://models.invoke.ai/InvokeAI/ip_adapter_plus_sd15) and [SD1.5 Image Encoder](https://models.invoke.ai/InvokeAI/ip_adapter_sd_image_encoder) must be installed to use IP-Adapter with SD1.5 based models.
|
2. Through the Model Manager UI with models from the *Tools* section of [www.models.invoke.ai](https://www.models.invoke.ai). To do this, copy the repo ID from the desired model page, and paste it in the Add Model field of the model manager. **Note** Both the IP-Adapter and the Image Encoder must be installed for IP-Adapter to work. For example, the [SD 1.5 IP-Adapter](https://models.invoke.ai/InvokeAI/ip_adapter_plus_sd15) and [SD1.5 Image Encoder](https://models.invoke.ai/InvokeAI/ip_adapter_sd_image_encoder) must be installed to use IP-Adapter with SD1.5 based models.
|
||||||
3. **Advanced -- Not recommended ** Manually downloading the IP-Adapter and Image Encoder files - Image Encoder folders shouid be placed in the `models\any\clip_vision` folders. IP Adapter Model folders should be placed in the relevant `ip-adapter` folder of relevant base model folder of Invoke root directory. For example, for the SDXL IP-Adapter, files should be added to the `model/sdxl/ip_adapter/` folder.
|
3. **Advanced -- Not recommended ** Manually downloading the IP-Adapter and Image Encoder files - Image Encoder folders shouid be placed in the `models\any\clip_vision` folders. IP Adapter Model folders should be placed in the relevant `ip-adapter` folder of relevant base model folder of Invoke root directory. For example, for the SDXL IP-Adapter, files should be added to the `model/sdxl/ip_adapter/` folder.
|
||||||
|
|
||||||
#### Using IP-Adapter
|
#### Using IP-Adapter
|
||||||
|
53
docs/features/LORAS.md
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
---
|
||||||
|
title: LoRAs & LCM-LoRAs
|
||||||
|
---
|
||||||
|
|
||||||
|
# :material-library-shelves: LoRAs & LCM-LoRAs
|
||||||
|
|
||||||
|
With the advances in research, many new capabilities are available to customize the knowledge and understanding of novel concepts not originally contained in the base model.
|
||||||
|
|
||||||
|
## LoRAs
|
||||||
|
|
||||||
|
Low-Rank Adaptation (LoRA) files are models that customize the output of Stable Diffusion
|
||||||
|
image generation. Larger than embeddings, but much smaller than full
|
||||||
|
models, they augment SD with improved understanding of subjects and
|
||||||
|
artistic styles.
|
||||||
|
|
||||||
|
Unlike TI files, LoRAs do not introduce novel vocabulary into the
|
||||||
|
model's known tokens. Instead, LoRAs augment the model's weights that
|
||||||
|
are applied to generate imagery. LoRAs may be supplied with a
|
||||||
|
"trigger" word that they have been explicitly trained on, or may
|
||||||
|
simply apply their effect without being triggered.
|
||||||
|
|
||||||
|
LoRAs are typically stored in .safetensors files, which are the most
|
||||||
|
secure way to store and transmit these types of weights. You may
|
||||||
|
install any number of `.safetensors` LoRA files simply by copying them
|
||||||
|
into the `autoimport/lora` directory of the corresponding InvokeAI models
|
||||||
|
directory (usually `invokeai` in your home directory).
|
||||||
|
|
||||||
|
To use these when generating, open the LoRA menu item in the options
|
||||||
|
panel, select the LoRAs you want to apply and ensure that they have
|
||||||
|
the appropriate weight recommended by the model provider. Typically,
|
||||||
|
most LoRAs perform best at a weight of .75-1.
|
||||||
|
|
||||||
|
|
||||||
|
## LCM-LoRAs
|
||||||
|
Latent Consistency Models (LCMs) allowed a reduced number of steps to be used to generate images with Stable Diffusion. These are created by distilling base models, creating models that only require a small number of steps to generate images. However, LCMs require that any fine-tune of a base model be distilled to be used as an LCM.
|
||||||
|
|
||||||
|
LCM-LoRAs are models that provide the benefit of LCMs but are able to be used as LoRAs and applied to any fine tune of a base model. LCM-LoRAs are created by training a small number of adapters, rather than distilling the entire fine-tuned base model. The resulting LoRA can be used the same way as a standard LoRA, but with a greatly reduced step count. This enables SDXL images to be generated up to 10x faster than without the use of LCM-LoRAs.
|
||||||
|
|
||||||
|
|
||||||
|
**Using LCM-LoRAs**
|
||||||
|
|
||||||
|
LCM-LoRAs are natively supported in InvokeAI throughout the application. To get started, install any diffusers format LCM-LoRAs using the model manager and select it in the LoRA field.
|
||||||
|
|
||||||
|
There are a number parameter differences when using LCM-LoRAs and standard generation:
|
||||||
|
|
||||||
|
- When using LCM-LoRAs, the LoRA strength should be lower than if using a standard LoRA, with 0.35 recommended as a starting point.
|
||||||
|
- The LCM scheduler should be used for generation
|
||||||
|
- CFG-Scale should be reduced to ~1
|
||||||
|
- Steps should be reduced in the range of 4-8
|
||||||
|
|
||||||
|
Standard LoRAs can also be used alongside LCM-LoRAs, but will also require a lower strength, with 0.45 being recommended as a starting point.
|
||||||
|
|
||||||
|
More information can be found here: https://huggingface.co/blog/lcm_lora#fast-inference-with-sdxl-lcm-loras
|
@ -16,9 +16,10 @@ Model Merging can be be done by navigating to the Model Manager and clicking the
|
|||||||
display all the diffusers-style models that InvokeAI knows about.
|
display all the diffusers-style models that InvokeAI knows about.
|
||||||
If you do not see the model you are looking for, then it is probably
|
If you do not see the model you are looking for, then it is probably
|
||||||
a legacy checkpoint model and needs to be converted using the
|
a legacy checkpoint model and needs to be converted using the
|
||||||
`invoke` command-line client and its `!optimize` command. You
|
"Convert" option in the Web-based Model Manager tab.
|
||||||
must select at least two models to merge. The third can be left at
|
|
||||||
"None" if you desire.
|
You must select at least two models to merge. The third can be left
|
||||||
|
at "None" if you desire.
|
||||||
|
|
||||||
* Alpha: This is the ratio to use when combining models. It ranges
|
* Alpha: This is the ratio to use when combining models. It ranges
|
||||||
from 0 to 1. The higher the value, the more weight is given to the
|
from 0 to 1. The higher the value, the more weight is given to the
|
||||||
|
@ -120,7 +120,7 @@ Generate an image with a given prompt, record the seed of the image, and then
|
|||||||
use the `prompt2prompt` syntax to substitute words in the original prompt for
|
use the `prompt2prompt` syntax to substitute words in the original prompt for
|
||||||
words in a new prompt. This works for `img2img` as well.
|
words in a new prompt. This works for `img2img` as well.
|
||||||
|
|
||||||
For example, consider the prompt `a cat.swap(dog) playing with a ball in the forest`. Normally, because of the word words interact with each other when doing a stable diffusion image generation, these two prompts would generate different compositions:
|
For example, consider the prompt `a cat.swap(dog) playing with a ball in the forest`. Normally, because the words interact with each other when doing a stable diffusion image generation, these two prompts would generate different compositions:
|
||||||
- `a cat playing with a ball in the forest`
|
- `a cat playing with a ball in the forest`
|
||||||
- `a dog playing with a ball in the forest`
|
- `a dog playing with a ball in the forest`
|
||||||
|
|
||||||
|
55
docs/features/TEXTUAL_INVERSIONS.md
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
## Using Textual Inversion Files
|
||||||
|
|
||||||
|
Textual inversion (TI) files are small models that customize the output of
|
||||||
|
Stable Diffusion image generation. They can augment SD with specialized subjects
|
||||||
|
and artistic styles. They are also known as "embeds" in the machine learning
|
||||||
|
world.
|
||||||
|
|
||||||
|
Each TI file introduces one or more vocabulary terms to the SD model. These are
|
||||||
|
known in InvokeAI as "triggers." Triggers are denoted using angle brackets
|
||||||
|
as in "<trigger-phrase>". The two most common type of
|
||||||
|
TI files that you'll encounter are `.pt` and `.bin` files, which are produced by
|
||||||
|
different TI training packages. InvokeAI supports both formats, but its
|
||||||
|
[built-in TI training system](TRAINING.md) produces `.pt`.
|
||||||
|
|
||||||
|
[Hugging Face](https://huggingface.co/sd-concepts-library) has
|
||||||
|
amassed a large library of >800 community-contributed TI files covering a
|
||||||
|
broad range of subjects and styles. You can also install your own or others' TI files
|
||||||
|
by placing them in the designated directory for the compatible model type
|
||||||
|
|
||||||
|
### An Example
|
||||||
|
|
||||||
|
Here are a few examples to illustrate how it works. All these images
|
||||||
|
were generated using the legacy command-line client and the Stable
|
||||||
|
Diffusion 1.5 model:
|
||||||
|
|
||||||
|
| Japanese gardener | Japanese gardener <ghibli-face> | Japanese gardener <hoi4-leaders> | Japanese gardener <cartoona-animals> |
|
||||||
|
| :--------------------------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------: |
|
||||||
|
|  |  |  |  |
|
||||||
|
|
||||||
|
You can also combine styles and concepts:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
| A portrait of <alf> in <cartoona-animal> style |
|
||||||
|
| :--------------------------------------------------------: |
|
||||||
|
|  |
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
|
||||||
|
## Installing your Own TI Files
|
||||||
|
|
||||||
|
You may install any number of `.pt` and `.bin` files simply by copying them into
|
||||||
|
the `embedding` directory of the corresponding InvokeAI models directory (usually `invokeai`
|
||||||
|
in your home directory). For example, you can simply move a Stable Diffusion 1.5 embedding file to
|
||||||
|
the `sd-1/embedding` folder. Be careful not to overwrite one file with another.
|
||||||
|
For example, TI files generated by the Hugging Face toolkit share the named
|
||||||
|
`learned_embedding.bin`. You can rename these, or use subdirectories to keep them distinct.
|
||||||
|
|
||||||
|
At startup time, InvokeAI will scan the various `embedding` directories and load any TI
|
||||||
|
files it finds there for compatible models. At startup you will see a message similar to this one:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
>> Current embedding manager terms: <HOI4-Leader>, <princess-knight>
|
||||||
|
```
|
||||||
|
To use these when generating, simply type the `<` key in your prompt to open the Textual Inversion WebUI and
|
||||||
|
select the embedding you'd like to use. This UI has type-ahead support, so you can easily find supported embeddings.
|
@ -229,29 +229,28 @@ clarity on the intent and common use cases we expect for utilizing them.
|
|||||||
currently being rendered by your browser into a merged copy of the image. This
|
currently being rendered by your browser into a merged copy of the image. This
|
||||||
lowers the resource requirements and should improve performance.
|
lowers the resource requirements and should improve performance.
|
||||||
|
|
||||||
### Seam Correction
|
### Compositing / Seam Correction
|
||||||
|
|
||||||
When doing Inpainting or Outpainting, Invoke needs to merge the pixels generated
|
When doing Inpainting or Outpainting, Invoke needs to merge the pixels generated
|
||||||
by Stable Diffusion into your existing image. To do this, the area around the
|
by Stable Diffusion into your existing image. This is achieved through compositing - the area around the the boundary between your image and the new generation is
|
||||||
`seam` at the boundary between your image and the new generation is
|
|
||||||
automatically blended to produce a seamless output. In a fully automatic
|
automatically blended to produce a seamless output. In a fully automatic
|
||||||
process, a mask is generated to cover the seam, and then the area of the seam is
|
process, a mask is generated to cover the boundary, and then the area of the boundary is
|
||||||
Inpainted.
|
Inpainted.
|
||||||
|
|
||||||
Although the default options should work well most of the time, sometimes it can
|
Although the default options should work well most of the time, sometimes it can
|
||||||
help to alter the parameters that control the seam Inpainting. A wider seam and
|
help to alter the parameters that control the Compositing. A larger blur and
|
||||||
a blur setting of about 1/3 of the seam have been noted as producing
|
a blur setting have been noted as producing
|
||||||
consistently strong results (e.g. 96 wide and 16 blur - adds up to 32 blur with
|
consistently strong results . Strength of 0.7 is best for reducing hard seams.
|
||||||
both sides). Seam strength of 0.7 is best for reducing hard seams.
|
|
||||||
|
- **Mode** - What part of the image will have the the Compositing applied to it.
|
||||||
|
- **Mask edge** will apply Compositing to the edge of the masked area
|
||||||
|
- **Mask** will apply Compositing to the entire masked area
|
||||||
|
- **Unmasked** will apply Compositing to the entire image
|
||||||
|
- **Steps** - Number of generation steps that will occur during the Coherence Pass, similar to Denoising Steps. Higher step counts will generally have better results.
|
||||||
|
- **Strength** - How much noise is added for the Coherence Pass, similar to Denoising Strength. A strength of 0 will result in an unchanged image, while a strength of 1 will result in an image with a completely new area as defined by the Mode setting.
|
||||||
|
- **Blur** - Adjusts the pixel radius of the the mask. A larger blur radius will cause the mask to extend past the visibly masked area, while too small of a blur radius will result in a mask that is smaller than the visibly masked area.
|
||||||
|
- **Blur Method** - The method of blur applied to the masked area.
|
||||||
|
|
||||||
- **Seam Size** - The size of the seam masked area. Set higher to make a larger
|
|
||||||
mask around the seam.
|
|
||||||
- **Seam Blur** - The size of the blur that is applied on _each_ side of the
|
|
||||||
masked area.
|
|
||||||
- **Seam Strength** - The Image To Image Strength parameter used for the
|
|
||||||
Inpainting generation that is applied to the seam area.
|
|
||||||
- **Seam Steps** - The number of generation steps that should be used to Inpaint
|
|
||||||
the seam.
|
|
||||||
|
|
||||||
### Infill & Scaling
|
### Infill & Scaling
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ title: Command-line Utilities
|
|||||||
|
|
||||||
InvokeAI comes with several scripts that are accessible via the
|
InvokeAI comes with several scripts that are accessible via the
|
||||||
command line. To access these commands, start the "developer's
|
command line. To access these commands, start the "developer's
|
||||||
console" from the launcher (`invoke.bat` menu item [8]). Users who are
|
console" from the launcher (`invoke.bat` menu item [7]). Users who are
|
||||||
familiar with Python can alternatively activate InvokeAI's virtual
|
familiar with Python can alternatively activate InvokeAI's virtual
|
||||||
environment (typically, but not necessarily `invokeai/.venv`).
|
environment (typically, but not necessarily `invokeai/.venv`).
|
||||||
|
|
||||||
@ -34,7 +34,7 @@ invokeai-web --ram 7
|
|||||||
|
|
||||||
## **invokeai-merge**
|
## **invokeai-merge**
|
||||||
|
|
||||||
This is the model merge script, the same as launcher option [4]. Call
|
This is the model merge script, the same as launcher option [3]. Call
|
||||||
it with the `--gui` command-line argument to start the interactive
|
it with the `--gui` command-line argument to start the interactive
|
||||||
console-based GUI. Alternatively, you can run it non-interactively
|
console-based GUI. Alternatively, you can run it non-interactively
|
||||||
using command-line arguments as illustrated in the example below which
|
using command-line arguments as illustrated in the example below which
|
||||||
@ -48,7 +48,7 @@ invokeai-merge --force --base-model sd-1 --models stable-diffusion-1.5 inkdiffus
|
|||||||
## **invokeai-ti**
|
## **invokeai-ti**
|
||||||
|
|
||||||
This is the textual inversion training script that is run by launcher
|
This is the textual inversion training script that is run by launcher
|
||||||
option [3]. Call it with `--gui` to run the interactive console-based
|
option [2]. Call it with `--gui` to run the interactive console-based
|
||||||
front end. It can also be run non-interactively. It has about a
|
front end. It can also be run non-interactively. It has about a
|
||||||
zillion arguments, but a typical training session can be launched
|
zillion arguments, but a typical training session can be launched
|
||||||
with:
|
with:
|
||||||
@ -68,7 +68,7 @@ in Windows).
|
|||||||
## **invokeai-install**
|
## **invokeai-install**
|
||||||
|
|
||||||
This is the console-based model install script that is run by launcher
|
This is the console-based model install script that is run by launcher
|
||||||
option [5]. If called without arguments, it will launch the
|
option [4]. If called without arguments, it will launch the
|
||||||
interactive console-based interface. It can also be used
|
interactive console-based interface. It can also be used
|
||||||
non-interactively to list, add and remove models as shown by these
|
non-interactively to list, add and remove models as shown by these
|
||||||
examples:
|
examples:
|
||||||
@ -148,7 +148,7 @@ launch the web server against it with `invokeai-web --root InvokeAI-New`.
|
|||||||
## **invokeai-update**
|
## **invokeai-update**
|
||||||
|
|
||||||
This is the interactive console-based script that is run by launcher
|
This is the interactive console-based script that is run by launcher
|
||||||
menu item [9] to update to a new version of InvokeAI. It takes no
|
menu item [8] to update to a new version of InvokeAI. It takes no
|
||||||
command-line arguments.
|
command-line arguments.
|
||||||
|
|
||||||
## **invokeai-metadata**
|
## **invokeai-metadata**
|
||||||
|
@ -1,131 +0,0 @@
|
|||||||
---
|
|
||||||
title: Variations
|
|
||||||
---
|
|
||||||
|
|
||||||
# :material-tune-variant: Variations
|
|
||||||
|
|
||||||
## Intro
|
|
||||||
|
|
||||||
InvokeAI's support for variations enables you to do the following:
|
|
||||||
|
|
||||||
1. Generate a series of systematic variations of an image, given a prompt. The
|
|
||||||
amount of variation from one image to the next can be controlled.
|
|
||||||
|
|
||||||
2. Given two or more variations that you like, you can combine them in a
|
|
||||||
weighted fashion.
|
|
||||||
|
|
||||||
!!! Information ""
|
|
||||||
|
|
||||||
This cheat sheet provides a quick guide for how this works in practice, using
|
|
||||||
variations to create the desired image of Xena, Warrior Princess.
|
|
||||||
|
|
||||||
## Step 1 -- Find a base image that you like
|
|
||||||
|
|
||||||
The prompt we will use throughout is:
|
|
||||||
|
|
||||||
`#!bash "lucy lawless as xena, warrior princess, character portrait, high resolution."`
|
|
||||||
|
|
||||||
This will be indicated as `#!bash "prompt"` in the examples below.
|
|
||||||
|
|
||||||
First we let SD create a series of images in the usual way, in this case
|
|
||||||
requesting six iterations.
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||

|
|
||||||
<figcaption> Seed 3357757885 looks nice </figcaption>
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Step 2 - Generating Variations
|
|
||||||
|
|
||||||
Let's try to generate some variations on this image. We select the "*"
|
|
||||||
symbol in the line of icons above the image in order to fix the prompt
|
|
||||||
and seed. Then we open up the "Variations" section of the generation
|
|
||||||
panel and use the slider to set the variation amount to 0.2. The
|
|
||||||
higher this value, the more each generated image will differ from the
|
|
||||||
previous one.
|
|
||||||
|
|
||||||
Now we run the prompt a second time, requesting six iterations. You
|
|
||||||
will see six images that are thematically related to each other. Try
|
|
||||||
increasing and decreasing the variation amount and see what happens.
|
|
||||||
|
|
||||||
### **Variation Sub Seeding**
|
|
||||||
|
|
||||||
Note that the output for each image has a `-V` option giving the "variant
|
|
||||||
subseed" for that image, consisting of a seed followed by the variation amount
|
|
||||||
used to generate it.
|
|
||||||
|
|
||||||
This gives us a series of closely-related variations, including the two shown
|
|
||||||
here.
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||

|
|
||||||
<figcaption>subseed 3647897225</figcaption>
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||

|
|
||||||
<figcaption>subseed 1614299449</figcaption>
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
I like the expression on Xena's face in the first one (subseed 3647897225), and
|
|
||||||
the armor on her shoulder in the second one (subseed 1614299449). Can we combine
|
|
||||||
them to get the best of both worlds?
|
|
||||||
|
|
||||||
We combine the two variations using `-V` (`--with_variations`). Again, we must
|
|
||||||
provide the seed for the originally-chosen image in order for this to work.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
invoke> "prompt" -S3357757885 -V3647897225,0.1,1614299449,0.1
|
|
||||||
Outputs:
|
|
||||||
./outputs/Xena/000003.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1 -S3357757885
|
|
||||||
```
|
|
||||||
|
|
||||||
Here we are providing equal weights (0.1 and 0.1) for both the subseeds. The
|
|
||||||
resulting image is close, but not exactly what I wanted:
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||

|
|
||||||
<figcaption> subseed 1614299449 </figcaption>
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
We could either try combining the images with different weights, or we can
|
|
||||||
generate more variations around the almost-but-not-quite image. We do the
|
|
||||||
latter, using both the `-V` (combining) and `-v` (variation strength) options.
|
|
||||||
Note that we use `-n6` to generate 6 variations:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
invoke> "prompt" -S3357757885 -V3647897225,0.1,1614299449,0.1 -v0.05 -n6
|
|
||||||
Outputs:
|
|
||||||
./outputs/Xena/000004.3279757577.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,3279757577:0.05 -S3357757885
|
|
||||||
./outputs/Xena/000004.2853129515.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2853129515:0.05 -S3357757885
|
|
||||||
./outputs/Xena/000004.3747154981.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,3747154981:0.05 -S3357757885
|
|
||||||
./outputs/Xena/000004.2664260391.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2664260391:0.05 -S3357757885
|
|
||||||
./outputs/Xena/000004.1642517170.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,1642517170:0.05 -S3357757885
|
|
||||||
./outputs/Xena/000004.2183375608.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2183375608:0.05 -S3357757885
|
|
||||||
```
|
|
||||||
|
|
||||||
This produces six images, all slight variations on the combination of the chosen
|
|
||||||
two images. Here's the one I like best:
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||

|
|
||||||
<figcaption> subseed 3747154981 </figcaption>
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
As you can see, this is a very powerful tool, which when combined with subprompt
|
|
||||||
weighting, gives you great control over the content and quality of your
|
|
||||||
generated images.
|
|
||||||
|
|
||||||
## Variations and Samplers
|
|
||||||
|
|
||||||
The sampler you choose has a strong effect on variation strength. Some
|
|
||||||
samplers, such as `k_euler_a` are very "creative" and produce significant
|
|
||||||
amounts of image-to-image variation even when the seed is fixed and the
|
|
||||||
`-v` argument is very low. Others are more deterministic. Feel free to
|
|
||||||
experiment until you find the combination that you like.
|
|
||||||
|
|
||||||
Also be aware of the [Perlin Noise](OTHER.md#thresholding-and-perlin-noise-initialization-options)
|
|
||||||
feature, which provides another way of introducing variability into your
|
|
||||||
image generation requests.
|
|
@ -20,7 +20,7 @@ a single convenient digital artist-optimized user interface.
|
|||||||
### * [Prompt Engineering](PROMPTS.md)
|
### * [Prompt Engineering](PROMPTS.md)
|
||||||
Get the images you want with the InvokeAI prompt engineering language.
|
Get the images you want with the InvokeAI prompt engineering language.
|
||||||
|
|
||||||
### * The [LoRA, LyCORIS and Textual Inversion Models](CONCEPTS.md)
|
### * The [LoRA, LyCORIS, LCM-LoRA Models](CONCEPTS.md)
|
||||||
Add custom subjects and styles using a variety of fine-tuned models.
|
Add custom subjects and styles using a variety of fine-tuned models.
|
||||||
|
|
||||||
### * [ControlNet](CONTROLNET.md)
|
### * [ControlNet](CONTROLNET.md)
|
||||||
@ -28,7 +28,7 @@ Learn how to install and use ControlNet models for fine control over
|
|||||||
image output.
|
image output.
|
||||||
|
|
||||||
### * [Image-to-Image Guide](IMG2IMG.md)
|
### * [Image-to-Image Guide](IMG2IMG.md)
|
||||||
Use a seed image to build new creations in the CLI.
|
Use a seed image to build new creations.
|
||||||
|
|
||||||
## Model Management
|
## Model Management
|
||||||
|
|
||||||
@ -40,7 +40,7 @@ guide also covers optimizing models to load quickly.
|
|||||||
Teach an old model new tricks. Merge 2-3 models together to create a
|
Teach an old model new tricks. Merge 2-3 models together to create a
|
||||||
new model that combines characteristics of the originals.
|
new model that combines characteristics of the originals.
|
||||||
|
|
||||||
### * [Textual Inversion](TRAINING.md)
|
### * [Textual Inversion](TEXTUAL_INVERSIONS.md)
|
||||||
Personalize models by adding your own style or subjects.
|
Personalize models by adding your own style or subjects.
|
||||||
|
|
||||||
## Other Features
|
## Other Features
|
||||||
|
43
docs/help/FAQ.md
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
# FAQs
|
||||||
|
|
||||||
|
**Where do I get started? How can I install Invoke?**
|
||||||
|
|
||||||
|
- You can download the latest installers [here](https://github.com/invoke-ai/InvokeAI/releases) - Note that any releases marked as *pre-release* are in a beta state. You may experience some issues, but we appreciate your help testing those! For stable/reliable installations, please install the **[Latest Release](https://github.com/invoke-ai/InvokeAI/releases/latest)**
|
||||||
|
|
||||||
|
**How can I download models? Can I use models I already have downloaded?**
|
||||||
|
|
||||||
|
- Models can be downloaded through the model manager, or through option [4] in the invoke.bat/invoke.sh launcher script. To download a model through the Model Manager, use the HuggingFace Repo ID by pressing the “Copy” button next to the repository name. Alternatively, to download a model from CivitAi, use the download link in the Model Manager.
|
||||||
|
- Models that are already downloaded can be used by creating a symlink to the model location in the `autoimport` folder or by using the Model Manger’s “Scan for Models” function.
|
||||||
|
|
||||||
|
**My images are taking a long time to generate. How can I speed up generation?**
|
||||||
|
|
||||||
|
- A common solution is to reduce the size of your RAM & VRAM cache to 0.25. This ensures your system has enough memory to generate images.
|
||||||
|
- Additionally, check the [hardware requirements](https://invoke-ai.github.io/InvokeAI/#hardware-requirements) to ensure that your system is capable of generating images.
|
||||||
|
- Lastly, double check your generations are happening on your GPU (if you have one). InvokeAI will log what is being used for generation upon startup.
|
||||||
|
|
||||||
|
**I’ve installed Python on Windows but the installer says it can’t find it?**
|
||||||
|
|
||||||
|
- Then ensure that you checked **'Add python.exe to PATH'** when installing Python. This can be found at the bottom of the Python Installer window. If you already have Python installed, this can be done with the modify / repair feature of the installer.
|
||||||
|
|
||||||
|
**I’ve installed everything successfully but I still get an error about Triton when starting Invoke?**
|
||||||
|
|
||||||
|
- This can be safely ignored. InvokeAI doesn't use Triton, but if you are on Linux and wish to dismiss the error, you can install Triton.
|
||||||
|
|
||||||
|
**I updated to 3.4.0 and now xFormers can’t load C++/CUDA?**
|
||||||
|
|
||||||
|
- An issue occurred with your PyTorch update. Follow these steps to fix :
|
||||||
|
1. Launch your invoke.bat / invoke.sh and select the option to open the developer console
|
||||||
|
2. Run:`pip install ".[xformers]" --upgrade --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu121`
|
||||||
|
- If you run into an error with `typing_extensions`, re-open the developer console and run: `pip install -U typing-extensions`
|
||||||
|
|
||||||
|
**It says my pip is out of date - is that why my install isn't working?**
|
||||||
|
- An out of date won't cause an installation to fail. The cause of the error can likely be found above the message that says pip is out of date.
|
||||||
|
- If you saw that warning but the install went well, don't worry about it (but you can update pip afterwards if you'd like).
|
||||||
|
|
||||||
|
**How can I generate the exact same that I found on the internet?**
|
||||||
|
Most example images with prompts that you'll find on the internet have been generated using different software, so you can't expect to get identical results. In order to reproduce an image, you need to replicate the exact settings and processing steps, including (but not limited to) the model, the positive and negative prompts, the seed, the sampler, the exact image size, any upscaling steps, etc.
|
||||||
|
|
||||||
|
|
||||||
|
**Where can I get more help?**
|
||||||
|
|
||||||
|
- Create an issue on [GitHub](https://github.com/invoke-ai/InvokeAI/issues) or post in the [#help channel](https://discord.com/channels/1020123559063990373/1149510134058471514) of the InvokeAI Discord
|
@ -57,7 +57,9 @@ Prompts provide the models directions on what to generate. As a general rule of
|
|||||||
|
|
||||||
Models are the magic that power InvokeAI. These files represent the output of training a machine on understanding massive amounts of images - providing them with the capability to generate new images using just a text description of what you’d like to see. (Like Stable Diffusion!)
|
Models are the magic that power InvokeAI. These files represent the output of training a machine on understanding massive amounts of images - providing them with the capability to generate new images using just a text description of what you’d like to see. (Like Stable Diffusion!)
|
||||||
|
|
||||||
Invoke offers a simple way to download several different models upon installation, but many more can be discovered online, including at ****. Each model can produce a unique style of output, based on the images it was trained on - Try out different models to see which best fits your creative vision!
|
Invoke offers a simple way to download several different models upon installation, but many more can be discovered online, including at https://models.invoke.ai
|
||||||
|
|
||||||
|
Each model can produce a unique style of output, based on the images it was trained on - Try out different models to see which best fits your creative vision!
|
||||||
|
|
||||||
- *Models that contain “inpainting” in the name are designed for use with the inpainting feature of the Unified Canvas*
|
- *Models that contain “inpainting” in the name are designed for use with the inpainting feature of the Unified Canvas*
|
||||||
|
|
||||||
|
BIN
docs/img/favicon.ico
Normal file
After Width: | Height: | Size: 4.2 KiB |
@ -18,7 +18,7 @@ title: Home
|
|||||||
width: 100%;
|
width: 100%;
|
||||||
max-width: 100%;
|
max-width: 100%;
|
||||||
height: 50px;
|
height: 50px;
|
||||||
background-color: #448AFF;
|
background-color: #35A4DB;
|
||||||
color: #fff;
|
color: #fff;
|
||||||
font-size: 16px;
|
font-size: 16px;
|
||||||
border: none;
|
border: none;
|
||||||
@ -43,7 +43,7 @@ title: Home
|
|||||||
<div align="center" markdown>
|
<div align="center" markdown>
|
||||||
|
|
||||||
|
|
||||||
[](https://github.com/invoke-ai/InvokeAI)
|
[](https://github.com/invoke-ai/InvokeAI)
|
||||||
|
|
||||||
[![discord badge]][discord link]
|
[![discord badge]][discord link]
|
||||||
|
|
||||||
@ -101,16 +101,13 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM.
|
|||||||
|
|
||||||
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
|
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
|
||||||
|
|
||||||
!!! Note
|
|
||||||
|
|
||||||
This project is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates as it will help aid response time.
|
|
||||||
|
|
||||||
## :octicons-link-24: Quick Links
|
## :octicons-link-24: Quick Links
|
||||||
|
|
||||||
<div class="button-container">
|
<div class="button-container">
|
||||||
<a href="installation/INSTALLATION"> <button class="button">Installation</button> </a>
|
<a href="installation/INSTALLATION"> <button class="button">Installation</button> </a>
|
||||||
<a href="features/"> <button class="button">Features</button> </a>
|
<a href="features/"> <button class="button">Features</button> </a>
|
||||||
<a href="help/gettingStartedWithAI/"> <button class="button">Getting Started</button> </a>
|
<a href="help/gettingStartedWithAI/"> <button class="button">Getting Started</button> </a>
|
||||||
|
<a href="help/FAQ/"> <button class="button">FAQ</button> </a>
|
||||||
<a href="contributing/CONTRIBUTING/"> <button class="button">Contributing</button> </a>
|
<a href="contributing/CONTRIBUTING/"> <button class="button">Contributing</button> </a>
|
||||||
<a href="https://github.com/invoke-ai/InvokeAI/"> <button class="button">Code and Downloads</button> </a>
|
<a href="https://github.com/invoke-ai/InvokeAI/"> <button class="button">Code and Downloads</button> </a>
|
||||||
<a href="https://github.com/invoke-ai/InvokeAI/issues"> <button class="button">Bug Reports </button> </a>
|
<a href="https://github.com/invoke-ai/InvokeAI/issues"> <button class="button">Bug Reports </button> </a>
|
||||||
@ -120,6 +117,11 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM.
|
|||||||
|
|
||||||
## :octicons-gift-24: InvokeAI Features
|
## :octicons-gift-24: InvokeAI Features
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
- [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
||||||
|
- [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||||
|
- [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||||
|
|
||||||
### The InvokeAI Web Interface
|
### The InvokeAI Web Interface
|
||||||
- [WebUI overview](features/WEB.md)
|
- [WebUI overview](features/WEB.md)
|
||||||
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||||
@ -143,67 +145,11 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB of RAM.
|
|||||||
<!-- seperator -->
|
<!-- seperator -->
|
||||||
### Prompt Engineering
|
### Prompt Engineering
|
||||||
- [Prompt Syntax](features/PROMPTS.md)
|
- [Prompt Syntax](features/PROMPTS.md)
|
||||||
- [Generating Variations](features/VARIATIONS.md)
|
|
||||||
|
|
||||||
### InvokeAI Configuration
|
### InvokeAI Configuration
|
||||||
- [Guide to InvokeAI Runtime Settings](features/CONFIGURATION.md)
|
- [Guide to InvokeAI Runtime Settings](features/CONFIGURATION.md)
|
||||||
- [Database Maintenance and other Command Line Utilities](features/UTILITIES.md)
|
- [Database Maintenance and other Command Line Utilities](features/UTILITIES.md)
|
||||||
|
|
||||||
## :octicons-log-16: Important Changes Since Version 2.3
|
|
||||||
|
|
||||||
### Nodes
|
|
||||||
|
|
||||||
Behind the scenes, InvokeAI has been completely rewritten to support
|
|
||||||
"nodes," small unitary operations that can be combined into graphs to
|
|
||||||
form arbitrary workflows. For example, there is a prompt node that
|
|
||||||
processes the prompt string and feeds it to a text2latent node that
|
|
||||||
generates a latent image. The latents are then fed to a latent2image
|
|
||||||
node that translates the latent image into a PNG.
|
|
||||||
|
|
||||||
The WebGUI has a node editor that allows you to graphically design and
|
|
||||||
execute custom node graphs. The ability to save and load graphs is
|
|
||||||
still a work in progress, but coming soon.
|
|
||||||
|
|
||||||
### Command-Line Interface Retired
|
|
||||||
|
|
||||||
The original "invokeai" command-line interface has been retired. The
|
|
||||||
`invokeai` command will now launch a new command-line client that can
|
|
||||||
be used by developers to create and test nodes. It is not intended to
|
|
||||||
be used for routine image generation or manipulation.
|
|
||||||
|
|
||||||
To launch the Web GUI from the command-line, use the command
|
|
||||||
`invokeai-web` rather than the traditional `invokeai --web`.
|
|
||||||
|
|
||||||
### ControlNet
|
|
||||||
|
|
||||||
This version of InvokeAI features ControlNet, a system that allows you
|
|
||||||
to achieve exact poses for human and animal figures by providing a
|
|
||||||
model to follow. Full details are found in [ControlNet](features/CONTROLNET.md)
|
|
||||||
|
|
||||||
### New Schedulers
|
|
||||||
|
|
||||||
The list of schedulers has been completely revamped and brought up to date:
|
|
||||||
|
|
||||||
| **Short Name** | **Scheduler** | **Notes** |
|
|
||||||
|----------------|---------------------------------|-----------------------------|
|
|
||||||
| **ddim** | DDIMScheduler | |
|
|
||||||
| **ddpm** | DDPMScheduler | |
|
|
||||||
| **deis** | DEISMultistepScheduler | |
|
|
||||||
| **lms** | LMSDiscreteScheduler | |
|
|
||||||
| **pndm** | PNDMScheduler | |
|
|
||||||
| **heun** | HeunDiscreteScheduler | original noise schedule |
|
|
||||||
| **heun_k** | HeunDiscreteScheduler | using karras noise schedule |
|
|
||||||
| **euler** | EulerDiscreteScheduler | original noise schedule |
|
|
||||||
| **euler_k** | EulerDiscreteScheduler | using karras noise schedule |
|
|
||||||
| **kdpm_2** | KDPM2DiscreteScheduler | |
|
|
||||||
| **kdpm_2_a** | KDPM2AncestralDiscreteScheduler | |
|
|
||||||
| **dpmpp_2s** | DPMSolverSinglestepScheduler | |
|
|
||||||
| **dpmpp_2m** | DPMSolverMultistepScheduler | original noise scnedule |
|
|
||||||
| **dpmpp_2m_k** | DPMSolverMultistepScheduler | using karras noise schedule |
|
|
||||||
| **unipc** | UniPCMultistepScheduler | CPU only |
|
|
||||||
|
|
||||||
Please see [3.0.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v3.0.0) for further details.
|
|
||||||
|
|
||||||
## :material-target: Troubleshooting
|
## :material-target: Troubleshooting
|
||||||
|
|
||||||
Please check out our **[:material-frequently-asked-questions:
|
Please check out our **[:material-frequently-asked-questions:
|
||||||
|
@ -40,7 +40,7 @@ experimental versions later.
|
|||||||
this, open up a command-line window ("Terminal" on Linux and
|
this, open up a command-line window ("Terminal" on Linux and
|
||||||
Macintosh, "Command" or "Powershell" on Windows) and type `python
|
Macintosh, "Command" or "Powershell" on Windows) and type `python
|
||||||
--version`. If Python is installed, it will print out the version
|
--version`. If Python is installed, it will print out the version
|
||||||
number. If it is version `3.9.*`, `3.10.*` or `3.11.*` you meet
|
number. If it is version `3.10.*` or `3.11.*` you meet
|
||||||
requirements.
|
requirements.
|
||||||
|
|
||||||
!!! warning "What to do if you have an unsupported version"
|
!!! warning "What to do if you have an unsupported version"
|
||||||
@ -48,7 +48,7 @@ experimental versions later.
|
|||||||
Go to [Python Downloads](https://www.python.org/downloads/)
|
Go to [Python Downloads](https://www.python.org/downloads/)
|
||||||
and download the appropriate installer package for your
|
and download the appropriate installer package for your
|
||||||
platform. We recommend [Version
|
platform. We recommend [Version
|
||||||
3.10.9](https://www.python.org/downloads/release/python-3109/),
|
3.10.12](https://www.python.org/downloads/release/python-3109/),
|
||||||
which has been extensively tested with InvokeAI.
|
which has been extensively tested with InvokeAI.
|
||||||
|
|
||||||
_Please select your platform in the section below for platform-specific
|
_Please select your platform in the section below for platform-specific
|
||||||
@ -179,7 +179,7 @@ experimental versions later.
|
|||||||
you will have the choice of CUDA (NVidia cards), ROCm (AMD cards),
|
you will have the choice of CUDA (NVidia cards), ROCm (AMD cards),
|
||||||
or CPU (no graphics acceleration). On Windows, you'll have the
|
or CPU (no graphics acceleration). On Windows, you'll have the
|
||||||
choice of CUDA vs CPU, and on Macs you'll be offered CPU only. When
|
choice of CUDA vs CPU, and on Macs you'll be offered CPU only. When
|
||||||
you select CPU on M1 or M2 Macintoshes, you will get MPS-based
|
you select CPU on M1/M2/M3 Macintoshes, you will get MPS-based
|
||||||
graphics acceleration without installing additional drivers. If you
|
graphics acceleration without installing additional drivers. If you
|
||||||
are unsure what GPU you are using, you can ask the installer to
|
are unsure what GPU you are using, you can ask the installer to
|
||||||
guess.
|
guess.
|
||||||
@ -471,13 +471,13 @@ Then type the following commands:
|
|||||||
|
|
||||||
=== "NVIDIA System"
|
=== "NVIDIA System"
|
||||||
```bash
|
```bash
|
||||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu118
|
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu121
|
||||||
pip install xformers
|
pip install xformers
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "AMD System"
|
=== "AMD System"
|
||||||
```bash
|
```bash
|
||||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||||
```
|
```
|
||||||
|
|
||||||
### Corrupted configuration file
|
### Corrupted configuration file
|
||||||
|
@ -32,7 +32,7 @@ gaming):
|
|||||||
|
|
||||||
* **Python**
|
* **Python**
|
||||||
|
|
||||||
version 3.9 through 3.11
|
version 3.10 through 3.11
|
||||||
|
|
||||||
* **CUDA Tools**
|
* **CUDA Tools**
|
||||||
|
|
||||||
@ -65,7 +65,7 @@ gaming):
|
|||||||
To install InvokeAI with virtual environments and the PIP package
|
To install InvokeAI with virtual environments and the PIP package
|
||||||
manager, please follow these steps:
|
manager, please follow these steps:
|
||||||
|
|
||||||
1. Please make sure you are using Python 3.9 through 3.11. The rest of the install
|
1. Please make sure you are using Python 3.10 through 3.11. The rest of the install
|
||||||
procedure depends on this and will not work with other versions:
|
procedure depends on this and will not work with other versions:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -148,13 +148,13 @@ manager, please follow these steps:
|
|||||||
=== "CUDA (NVidia)"
|
=== "CUDA (NVidia)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu118
|
pip install "InvokeAI[xformers]" --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "ROCm (AMD)"
|
=== "ROCm (AMD)"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "CPU (Intel Macs & non-GPU systems)"
|
=== "CPU (Intel Macs & non-GPU systems)"
|
||||||
@ -230,13 +230,13 @@ manager, please follow these steps:
|
|||||||
=== "local Webserver"
|
=== "local Webserver"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invokeai --web
|
invokeai-web
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Public Webserver"
|
=== "Public Webserver"
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
invokeai --web --host 0.0.0.0
|
invokeai-web --host 0.0.0.0
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "CLI"
|
=== "CLI"
|
||||||
@ -293,6 +293,19 @@ manager, please follow these steps:
|
|||||||
|
|
||||||
## Developer Install
|
## Developer Install
|
||||||
|
|
||||||
|
!!! warning
|
||||||
|
|
||||||
|
InvokeAI uses a SQLite database. By running on `main`, you accept responsibility for your database. This
|
||||||
|
means making regular backups (especially before pulling) and/or fixing it yourself in the event that a
|
||||||
|
PR introduces a schema change.
|
||||||
|
|
||||||
|
If you don't need persistent backend storage, you can use an ephemeral in-memory database by setting
|
||||||
|
`use_memory_db: true` under `Path:` in your `invokeai.yaml` file.
|
||||||
|
|
||||||
|
If this is untenable, you should run the application via the official installer or a manual install of the
|
||||||
|
python package from pypi. These releases will not break your database.
|
||||||
|
|
||||||
|
|
||||||
If you have an interest in how InvokeAI works, or you would like to
|
If you have an interest in how InvokeAI works, or you would like to
|
||||||
add features or bugfixes, you are encouraged to install the source
|
add features or bugfixes, you are encouraged to install the source
|
||||||
code for InvokeAI. For this to work, you will need to install the
|
code for InvokeAI. For this to work, you will need to install the
|
||||||
@ -300,7 +313,7 @@ code for InvokeAI. For this to work, you will need to install the
|
|||||||
on your system, please see the [Git Installation
|
on your system, please see the [Git Installation
|
||||||
Guide](https://github.com/git-guides/install-git)
|
Guide](https://github.com/git-guides/install-git)
|
||||||
|
|
||||||
You will also need to install the [frontend development toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/docs/contributing/contribution_guides/contributingToFrontend.md).
|
You will also need to install the [frontend development toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/README.md).
|
||||||
|
|
||||||
If you have a "normal" installation, you should create a totally separate virtual environment for the git-based installation, else the two may interfere.
|
If you have a "normal" installation, you should create a totally separate virtual environment for the git-based installation, else the two may interfere.
|
||||||
|
|
||||||
@ -327,12 +340,12 @@ installation protocol (important!)
|
|||||||
|
|
||||||
=== "CUDA (NVidia)"
|
=== "CUDA (NVidia)"
|
||||||
```bash
|
```bash
|
||||||
pip install -e .[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu118
|
pip install -e .[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "ROCm (AMD)"
|
=== "ROCm (AMD)"
|
||||||
```bash
|
```bash
|
||||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.4.2
|
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.6
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "CPU (Intel Macs & non-GPU systems)"
|
=== "CPU (Intel Macs & non-GPU systems)"
|
||||||
@ -348,7 +361,7 @@ installation protocol (important!)
|
|||||||
Be sure to pass `-e` (for an editable install) and don't forget the
|
Be sure to pass `-e` (for an editable install) and don't forget the
|
||||||
dot ("."). It is part of the command.
|
dot ("."). It is part of the command.
|
||||||
|
|
||||||
5. Install the [frontend toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/docs/contributing/contribution_guides/contributingToFrontend.md) and do a production build of the UI as described.
|
5. Install the [frontend toolchain](https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/frontend/web/README.md) and do a production build of the UI as described.
|
||||||
|
|
||||||
6. You can now run `invokeai` and its related commands. The code will be
|
6. You can now run `invokeai` and its related commands. The code will be
|
||||||
read from the repository, so that you can edit the .py source files
|
read from the repository, so that you can edit the .py source files
|
||||||
@ -375,7 +388,7 @@ you can do so using this unsupported recipe:
|
|||||||
mkdir ~/invokeai
|
mkdir ~/invokeai
|
||||||
conda create -n invokeai python=3.10
|
conda create -n invokeai python=3.10
|
||||||
conda activate invokeai
|
conda activate invokeai
|
||||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu118
|
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu121
|
||||||
invokeai-configure --root ~/invokeai
|
invokeai-configure --root ~/invokeai
|
||||||
invokeai --root ~/invokeai --web
|
invokeai --root ~/invokeai --web
|
||||||
```
|
```
|
||||||
@ -388,3 +401,5 @@ environment variable INVOKEAI_ROOT to point to the installation directory.
|
|||||||
|
|
||||||
Note that if you run into problems with the Conda installation, the InvokeAI
|
Note that if you run into problems with the Conda installation, the InvokeAI
|
||||||
staff will **not** be able to help you out. Caveat Emptor!
|
staff will **not** be able to help you out. Caveat Emptor!
|
||||||
|
|
||||||
|
[dev-chat]: https://discord.com/channels/1020123559063990373/1049495067846524939
|
||||||
|
@ -85,7 +85,7 @@ You can find which version you should download from [this link](https://docs.nvi
|
|||||||
|
|
||||||
When installing torch and torchvision manually with `pip`, remember to provide
|
When installing torch and torchvision manually with `pip`, remember to provide
|
||||||
the argument `--extra-index-url
|
the argument `--extra-index-url
|
||||||
https://download.pytorch.org/whl/cu118` as described in the [Manual
|
https://download.pytorch.org/whl/cu121` as described in the [Manual
|
||||||
Installation Guide](020_INSTALL_MANUAL.md).
|
Installation Guide](020_INSTALL_MANUAL.md).
|
||||||
|
|
||||||
## :simple-amd: ROCm
|
## :simple-amd: ROCm
|
||||||
@ -134,7 +134,7 @@ recipes are available
|
|||||||
|
|
||||||
When installing torch and torchvision manually with `pip`, remember to provide
|
When installing torch and torchvision manually with `pip`, remember to provide
|
||||||
the argument `--extra-index-url
|
the argument `--extra-index-url
|
||||||
https://download.pytorch.org/whl/rocm5.4.2` as described in the [Manual
|
https://download.pytorch.org/whl/rocm5.6` as described in the [Manual
|
||||||
Installation Guide](020_INSTALL_MANUAL.md).
|
Installation Guide](020_INSTALL_MANUAL.md).
|
||||||
|
|
||||||
This will be done automatically for you if you use the installer
|
This will be done automatically for you if you use the installer
|
||||||
|
@ -4,38 +4,49 @@ title: Installing with Docker
|
|||||||
|
|
||||||
# :fontawesome-brands-docker: Docker
|
# :fontawesome-brands-docker: Docker
|
||||||
|
|
||||||
!!! warning "For most users"
|
!!! warning "macOS and AMD GPU Users"
|
||||||
|
|
||||||
We highly recommend to Install InvokeAI locally using [these instructions](INSTALLATION.md)
|
We highly recommend to Install InvokeAI locally using [these instructions](INSTALLATION.md),
|
||||||
|
because Docker containers can not access the GPU on macOS.
|
||||||
|
|
||||||
!!! tip "For developers"
|
!!! warning "AMD GPU Users"
|
||||||
|
|
||||||
For container-related development tasks or for enabling easy
|
Container support for AMD GPUs has been reported to work by the community, but has not received
|
||||||
deployment to other environments (on-premises or cloud), follow these
|
extensive testing. Please make sure to set the `GPU_DRIVER=rocm` environment variable (see below), and
|
||||||
instructions.
|
use the `build.sh` script to build the image for this to take effect at build time.
|
||||||
|
|
||||||
For general use, install locally to leverage your machine's GPU.
|
!!! tip "Linux and Windows Users"
|
||||||
|
|
||||||
|
For optimal performance, configure your Docker daemon to access your machine's GPU.
|
||||||
|
Docker Desktop on Windows [includes GPU support](https://www.docker.com/blog/wsl-2-gpu-support-for-docker-desktop-on-nvidia-gpus/).
|
||||||
|
Linux users should install and configure the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
|
||||||
|
|
||||||
## Why containers?
|
## Why containers?
|
||||||
|
|
||||||
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
They provide a flexible, reliable way to build and deploy InvokeAI.
|
||||||
use a Docker volume to store the largest model files and image outputs as a
|
See [Processes](https://12factor.net/processes) under the Twelve-Factor App
|
||||||
first step in decoupling storage and compute. Future enhancements can do this
|
methodology for details on why running applications in such a stateless fashion is important.
|
||||||
for other assets. See [Processes](https://12factor.net/processes) under the
|
|
||||||
Twelve-Factor App methodology for details on why running applications in such a
|
|
||||||
stateless fashion is important.
|
|
||||||
|
|
||||||
You can specify the target platform when building the image and running the
|
The container is configured for CUDA by default, but can be built to support AMD GPUs
|
||||||
container. You'll also need to specify the InvokeAI requirements file that
|
by setting the `GPU_DRIVER=rocm` environment variable at Docker image build time.
|
||||||
matches the container's OS and the architecture it will run on.
|
|
||||||
|
|
||||||
Developers on Apple silicon (M1/M2): You
|
Developers on Apple silicon (M1/M2/M3): You
|
||||||
[can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224)
|
[can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224)
|
||||||
and performance is reduced compared with running it directly on macOS but for
|
and performance is reduced compared with running it directly on macOS but for
|
||||||
development purposes it's fine. Once you're done with development tasks on your
|
development purposes it's fine. Once you're done with development tasks on your
|
||||||
laptop you can build for the target platform and architecture and deploy to
|
laptop you can build for the target platform and architecture and deploy to
|
||||||
another environment with NVIDIA GPUs on-premises or in the cloud.
|
another environment with NVIDIA GPUs on-premises or in the cloud.
|
||||||
|
|
||||||
|
## TL;DR
|
||||||
|
|
||||||
|
This assumes properly configured Docker on Linux or Windows/WSL2. Read on for detailed customization options.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# docker compose commands should be run from the `docker` directory
|
||||||
|
cd docker
|
||||||
|
docker compose up
|
||||||
|
```
|
||||||
|
|
||||||
## Installation in a Linux container (desktop)
|
## Installation in a Linux container (desktop)
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
@ -58,222 +69,44 @@ a token and copy it, since you will need in for the next step.
|
|||||||
|
|
||||||
### Setup
|
### Setup
|
||||||
|
|
||||||
Set the fork you want to use and other variables.
|
Set up your environmnent variables. In the `docker` directory, make a copy of `.env.sample` and name it `.env`. Make changes as necessary.
|
||||||
|
|
||||||
!!! tip
|
Any environment variables supported by InvokeAI can be set here - please see the [CONFIGURATION](../features/CONFIGURATION.md) for further detail.
|
||||||
|
|
||||||
I preffer to save my env vars
|
At a minimum, you might want to set the `INVOKEAI_ROOT` environment variable
|
||||||
in the repository root in a `.env` (or `.envrc`) file to automatically re-apply
|
to point to the location where you wish to store your InvokeAI models, configuration, and outputs.
|
||||||
them when I come back.
|
|
||||||
|
|
||||||
The build- and run- scripts contain default values for almost everything,
|
|
||||||
besides the [Hugging Face Token](https://huggingface.co/settings/tokens) you
|
|
||||||
created in the last step.
|
|
||||||
|
|
||||||
Some Suggestions of variables you may want to change besides the Token:
|
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
|
|
||||||
| Environment-Variable <img width="220" align="right"/> | Default value <img width="360" align="right"/> | Description |
|
| Environment-Variable <img width="220" align="right"/> | Default value <img width="360" align="right"/> | Description |
|
||||||
| ----------------------------------------------------- | ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
| ----------------------------------------------------- | ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| `HUGGING_FACE_HUB_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
|
| `INVOKEAI_ROOT` | `~/invokeai` | **Required** - the location of your InvokeAI root directory. It will be created if it does not exist.
|
||||||
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
|
| `HUGGING_FACE_HUB_TOKEN` | | InvokeAI will work without it, but some of the integrations with HuggingFace (like downloading from models from private repositories) may not work|
|
||||||
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
|
| `GPU_DRIVER` | `cuda` | Optionally change this to `rocm` to build the image for AMD GPUs. NOTE: Use the `build.sh` script to build the image for this to take effect.
|
||||||
| `ARCH` | arch of the build machine | Can be changed if you want to build the image for another arch |
|
|
||||||
| `CONTAINER_REGISTRY` | ghcr.io | Name of the Container Registry to use for the full tag |
|
|
||||||
| `CONTAINER_REPOSITORY` | `$(whoami)/${REPOSITORY_NAME}` | Name of the Container Repository |
|
|
||||||
| `CONTAINER_FLAVOR` | `cuda` | The flavor of the image to built, available options are `cuda`, `rocm` and `cpu`. If you choose `rocm` or `cpu`, the extra-index-url will be selected automatically, unless you set one yourself. |
|
|
||||||
| `CONTAINER_TAG` | `${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}` | The Container Repository / Tag which will be used |
|
|
||||||
| `INVOKE_DOCKERFILE` | `Dockerfile` | The Dockerfile which should be built, handy for development |
|
|
||||||
| `PIP_EXTRA_INDEX_URL` | | If you want to use a custom pip-extra-index-url |
|
|
||||||
|
|
||||||
</figure>
|
</figure>
|
||||||
|
|
||||||
#### Build the Image
|
#### Build the Image
|
||||||
|
|
||||||
I provided a build script, which is located next to the Dockerfile in
|
Use the standard `docker compose build` command from within the `docker` directory.
|
||||||
`docker/build.sh`. It can be executed from repository root like this:
|
|
||||||
|
|
||||||
```bash
|
If using an AMD GPU:
|
||||||
./docker/build.sh
|
a: set the `GPU_DRIVER=rocm` environment variable in `docker-compose.yml` and continue using `docker compose build` as usual, or
|
||||||
```
|
b: set `GPU_DRIVER=rocm` in the `.env` file and use the `build.sh` script, provided for convenience
|
||||||
|
|
||||||
The build Script not only builds the container, but also creates the docker
|
|
||||||
volume if not existing yet.
|
|
||||||
|
|
||||||
#### Run the Container
|
#### Run the Container
|
||||||
|
|
||||||
After the build process is done, you can run the container via the provided
|
Use the standard `docker compose up` command, and generally the `docker compose` [CLI](https://docs.docker.com/compose/reference/) as usual.
|
||||||
`docker/run.sh` script
|
|
||||||
|
|
||||||
```bash
|
Once the container starts up (and configures the InvokeAI root directory if this is a new installation), you can access InvokeAI at [http://localhost:9090](http://localhost:9090)
|
||||||
./docker/run.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
When used without arguments, the container will start the webserver and provide
|
## Troubleshooting / FAQ
|
||||||
you the link to open it. But if you want to use some other parameters you can
|
|
||||||
also do so.
|
|
||||||
|
|
||||||
!!! example "run script example"
|
- Q: I am running on Windows under WSL2, and am seeing a "no such file or directory" error.
|
||||||
|
- A: Your `docker-entrypoint.sh` file likely has Windows (CRLF) as opposed to Unix (LF) line endings,
|
||||||
```bash
|
and you may have cloned this repository before the issue was fixed. To solve this, please change
|
||||||
./docker/run.sh "banana sushi" -Ak_lms -S42 -s10
|
the line endings in the `docker-entrypoint.sh` file to `LF`. You can do this in VSCode
|
||||||
```
|
(`Ctrl+P` and search for "line endings"), or by using the `dos2unix` utility in WSL.
|
||||||
|
Finally, you may delete `docker-entrypoint.sh` followed by `git pull; git checkout docker/docker-entrypoint.sh`
|
||||||
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
to reset the file to its most recent version.
|
||||||
|
For more information on this issue, please see the [Docker Desktop documentation](https://docs.docker.com/desktop/troubleshoot/topics/#avoid-unexpected-syntax-errors-use-unix-style-line-endings-for-files-in-containers)
|
||||||
Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Running the container on your GPU
|
|
||||||
|
|
||||||
If you have an Nvidia GPU, you can enable InvokeAI to run on the GPU by running
|
|
||||||
the container with an extra environment variable to enable GPU usage and have
|
|
||||||
the process run much faster:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
GPU_FLAGS=all ./docker/run.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
This passes the `--gpus all` to docker and uses the GPU.
|
|
||||||
|
|
||||||
If you don't have a GPU (or your host is not yet setup to use it) you will see a
|
|
||||||
message like this:
|
|
||||||
|
|
||||||
`docker: Error response from daemon: could not select device driver "" with capabilities: [[gpu]].`
|
|
||||||
|
|
||||||
You can use the full set of GPU combinations documented here:
|
|
||||||
|
|
||||||
https://docs.docker.com/config/containers/resource_constraints/#gpu
|
|
||||||
|
|
||||||
For example, use `GPU_FLAGS=device=GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a` to
|
|
||||||
choose a specific device identified by a UUID.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
!!! warning "Deprecated"
|
|
||||||
|
|
||||||
From here on you will find the the previous Docker-Docs, which will still
|
|
||||||
provide some usefull informations.
|
|
||||||
|
|
||||||
## Usage (time to have fun)
|
|
||||||
|
|
||||||
### Startup
|
|
||||||
|
|
||||||
If you're on a **Linux container** the `invoke` script is **automatically
|
|
||||||
started** and the output dir set to the Docker volume you created earlier.
|
|
||||||
|
|
||||||
If you're **directly on macOS follow these startup instructions**. With the
|
|
||||||
Conda environment activated (`conda activate ldm`), run the interactive
|
|
||||||
interface that combines the functionality of the original scripts `txt2img` and
|
|
||||||
`img2img`: Use the more accurate but VRAM-intensive full precision math because
|
|
||||||
half-precision requires autocast and won't work. By default the images are saved
|
|
||||||
in `outputs/img-samples/`.
|
|
||||||
|
|
||||||
```Shell
|
|
||||||
python3 scripts/invoke.py --full_precision
|
|
||||||
```
|
|
||||||
|
|
||||||
You'll get the script's prompt. You can see available options or quit.
|
|
||||||
|
|
||||||
```Shell
|
|
||||||
invoke> -h
|
|
||||||
invoke> q
|
|
||||||
```
|
|
||||||
|
|
||||||
### Text to Image
|
|
||||||
|
|
||||||
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
|
||||||
image. This will let you know that everything is set up correctly. Then increase
|
|
||||||
steps to 100 or more for good (but slower) results. The prompt can be in quotes
|
|
||||||
or not.
|
|
||||||
|
|
||||||
```Shell
|
|
||||||
invoke> The hulk fighting with sheldon cooper -s5 -n1
|
|
||||||
invoke> "woman closeup highly detailed" -s 150
|
|
||||||
# Reuse previous seed and apply face restoration
|
|
||||||
invoke> "woman closeup highly detailed" --steps 150 --seed -1 -G 0.75
|
|
||||||
```
|
|
||||||
|
|
||||||
You'll need to experiment to see if face restoration is making it better or
|
|
||||||
worse for your specific prompt.
|
|
||||||
|
|
||||||
If you're on a container the output is set to the Docker volume. You can copy it
|
|
||||||
wherever you want. You can download it from the Docker Desktop app, Volumes,
|
|
||||||
my-vol, data. Or you can copy it from your Mac terminal. Keep in mind
|
|
||||||
`docker cp` can't expand `*.png` so you'll need to specify the image file name.
|
|
||||||
|
|
||||||
On your host Mac (you can use the name of any container that mounted the
|
|
||||||
volume):
|
|
||||||
|
|
||||||
```Shell
|
|
||||||
docker cp dummy:/data/000001.928403745.png /Users/<your-user>/Pictures
|
|
||||||
```
|
|
||||||
|
|
||||||
### Image to Image
|
|
||||||
|
|
||||||
You can also do text-guided image-to-image translation. For example, turning a
|
|
||||||
sketch into a detailed drawing.
|
|
||||||
|
|
||||||
`strength` is a value between 0.0 and 1.0 that controls the amount of noise that
|
|
||||||
is added to the input image. Values that approach 1.0 allow for lots of
|
|
||||||
variations but will also produce images that are not semantically consistent
|
|
||||||
with the input. 0.0 preserves image exactly, 1.0 replaces it completely.
|
|
||||||
|
|
||||||
Make sure your input image size dimensions are multiples of 64 e.g. 512x512.
|
|
||||||
Otherwise you'll get `Error: product of dimension sizes > 2**31'`. If you still
|
|
||||||
get the error
|
|
||||||
[try a different size](https://support.apple.com/guide/preview/resize-rotate-or-flip-an-image-prvw2015/mac#:~:text=image's%20file%20size-,In%20the%20Preview%20app%20on%20your%20Mac%2C%20open%20the%20file,is%20shown%20at%20the%20bottom.)
|
|
||||||
like 512x256.
|
|
||||||
|
|
||||||
If you're on a Docker container, copy your input image into the Docker volume
|
|
||||||
|
|
||||||
```Shell
|
|
||||||
docker cp /Users/<your-user>/Pictures/sketch-mountains-input.jpg dummy:/data/
|
|
||||||
```
|
|
||||||
|
|
||||||
Try it out generating an image (or more). The `invoke` script needs absolute
|
|
||||||
paths to find the image so don't use `~`.
|
|
||||||
|
|
||||||
If you're on your Mac
|
|
||||||
|
|
||||||
```Shell
|
|
||||||
invoke> "A fantasy landscape, trending on artstation" -I /Users/<your-user>/Pictures/sketch-mountains-input.jpg --strength 0.75 --steps 100 -n4
|
|
||||||
```
|
|
||||||
|
|
||||||
If you're on a Linux container on your Mac
|
|
||||||
|
|
||||||
```Shell
|
|
||||||
invoke> "A fantasy landscape, trending on artstation" -I /data/sketch-mountains-input.jpg --strength 0.75 --steps 50 -n1
|
|
||||||
```
|
|
||||||
|
|
||||||
### Web Interface
|
|
||||||
|
|
||||||
You can use the `invoke` script with a graphical web interface. Start the web
|
|
||||||
server with:
|
|
||||||
|
|
||||||
```Shell
|
|
||||||
python3 scripts/invoke.py --full_precision --web
|
|
||||||
```
|
|
||||||
|
|
||||||
If it's running on your Mac point your Mac web browser to
|
|
||||||
<http://127.0.0.1:9090>
|
|
||||||
|
|
||||||
Press Control-C at the command line to stop the web server.
|
|
||||||
|
|
||||||
### Notes
|
|
||||||
|
|
||||||
Some text you can add at the end of the prompt to make it very pretty:
|
|
||||||
|
|
||||||
```Shell
|
|
||||||
cinematic photo, highly detailed, cinematic lighting, ultra-detailed, ultrarealistic, photorealism, Octane Rendering, cyberpunk lights, Hyper Detail, 8K, HD, Unreal Engine, V-Ray, full hd, cyberpunk, abstract, 3d octane render + 4k UHD + immense detail + dramatic lighting + well lit + black, purple, blue, pink, cerulean, teal, metallic colours, + fine details, ultra photoreal, photographic, concept art, cinematic composition, rule of thirds, mysterious, eerie, photorealism, breathtaking detailed, painting art deco pattern, by hsiao, ron cheng, john james audubon, bizarre compositions, exquisite detail, extremely moody lighting, painted by greg rutkowski makoto shinkai takashi takeuchi studio ghibli, akihiko yoshida
|
|
||||||
```
|
|
||||||
|
|
||||||
The original scripts should work as well.
|
|
||||||
|
|
||||||
```Shell
|
|
||||||
python3 scripts/orig_scripts/txt2img.py --help
|
|
||||||
python3 scripts/orig_scripts/txt2img.py --ddim_steps 100 --n_iter 1 --n_samples 1 --plms --prompt "new born baby kitten. Hyper Detail, Octane Rendering, Unreal Engine, V-Ray"
|
|
||||||
python3 scripts/orig_scripts/txt2img.py --ddim_steps 5 --n_iter 1 --n_samples 1 --plms --prompt "ocean" # or --klms
|
|
||||||
```
|
|
||||||
|
@ -84,7 +84,7 @@ InvokeAI root directory's `autoimport` folder.
|
|||||||
|
|
||||||
### Installation via `invokeai-model-install`
|
### Installation via `invokeai-model-install`
|
||||||
|
|
||||||
From the `invoke` launcher, choose option [5] "Download and install
|
From the `invoke` launcher, choose option [4] "Download and install
|
||||||
models." This will launch the same script that prompted you to select
|
models." This will launch the same script that prompted you to select
|
||||||
models at install time. You can use this to add models that you
|
models at install time. You can use this to add models that you
|
||||||
skipped the first time around. It is all right to specify a model that
|
skipped the first time around. It is all right to specify a model that
|
||||||
|
@ -59,8 +59,7 @@ Prior to installing PyPatchMatch, you need to take the following steps:
|
|||||||
`from patchmatch import patch_match`: It should look like the following:
|
`from patchmatch import patch_match`: It should look like the following:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
Python 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0] on linux
|
||||||
[GCC 9.3.0] on linux
|
|
||||||
Type "help", "copyright", "credits" or "license" for more information.
|
Type "help", "copyright", "credits" or "license" for more information.
|
||||||
>>> from patchmatch import patch_match
|
>>> from patchmatch import patch_match
|
||||||
Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch".
|
Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch".
|
||||||
|
@ -28,7 +28,7 @@ command line, then just be sure to activate it's virtual environment.
|
|||||||
Then run the following three commands:
|
Then run the following three commands:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
pip install xformers~=0.0.19
|
pip install xformers~=0.0.22
|
||||||
pip install triton # WON'T WORK ON WINDOWS
|
pip install triton # WON'T WORK ON WINDOWS
|
||||||
python -m xformers.info output
|
python -m xformers.info output
|
||||||
```
|
```
|
||||||
@ -42,7 +42,7 @@ If all goes well, you'll see a report like the
|
|||||||
following:
|
following:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
xFormers 0.0.20
|
xFormers 0.0.22
|
||||||
memory_efficient_attention.cutlassF: available
|
memory_efficient_attention.cutlassF: available
|
||||||
memory_efficient_attention.cutlassB: available
|
memory_efficient_attention.cutlassB: available
|
||||||
memory_efficient_attention.flshattF: available
|
memory_efficient_attention.flshattF: available
|
||||||
@ -59,14 +59,14 @@ swiglu.gemm_fused_operand_sum: available
|
|||||||
swiglu.fused.p.cpp: available
|
swiglu.fused.p.cpp: available
|
||||||
is_triton_available: True
|
is_triton_available: True
|
||||||
is_functorch_available: False
|
is_functorch_available: False
|
||||||
pytorch.version: 2.0.1+cu118
|
pytorch.version: 2.1.0+cu121
|
||||||
pytorch.cuda: available
|
pytorch.cuda: available
|
||||||
gpu.compute_capability: 8.9
|
gpu.compute_capability: 8.9
|
||||||
gpu.name: NVIDIA GeForce RTX 4070
|
gpu.name: NVIDIA GeForce RTX 4070
|
||||||
build.info: available
|
build.info: available
|
||||||
build.cuda_version: 1108
|
build.cuda_version: 1108
|
||||||
build.python_version: 3.10.11
|
build.python_version: 3.10.11
|
||||||
build.torch_version: 2.0.1+cu118
|
build.torch_version: 2.1.0+cu121
|
||||||
build.env.TORCH_CUDA_ARCH_LIST: 5.0+PTX 6.0 6.1 7.0 7.5 8.0 8.6
|
build.env.TORCH_CUDA_ARCH_LIST: 5.0+PTX 6.0 6.1 7.0 7.5 8.0 8.6
|
||||||
build.env.XFORMERS_BUILD_TYPE: Release
|
build.env.XFORMERS_BUILD_TYPE: Release
|
||||||
build.env.XFORMERS_ENABLE_DEBUG_ASSERTIONS: None
|
build.env.XFORMERS_ENABLE_DEBUG_ASSERTIONS: None
|
||||||
@ -92,33 +92,22 @@ installed from source. These instructions were written for a system
|
|||||||
running Ubuntu 22.04, but other Linux distributions should be able to
|
running Ubuntu 22.04, but other Linux distributions should be able to
|
||||||
adapt this recipe.
|
adapt this recipe.
|
||||||
|
|
||||||
#### 1. Install CUDA Toolkit 11.8
|
#### 1. Install CUDA Toolkit 12.1
|
||||||
|
|
||||||
You will need the CUDA developer's toolkit in order to compile and
|
You will need the CUDA developer's toolkit in order to compile and
|
||||||
install xFormers. **Do not try to install Ubuntu's nvidia-cuda-toolkit
|
install xFormers. **Do not try to install Ubuntu's nvidia-cuda-toolkit
|
||||||
package.** It is out of date and will cause conflicts among the NVIDIA
|
package.** It is out of date and will cause conflicts among the NVIDIA
|
||||||
driver and binaries. Instead install the CUDA Toolkit package provided
|
driver and binaries. Instead install the CUDA Toolkit package provided
|
||||||
by NVIDIA itself. Go to [CUDA Toolkit 11.8
|
by NVIDIA itself. Go to [CUDA Toolkit 12.1
|
||||||
Downloads](https://developer.nvidia.com/cuda-11-8-0-download-archive)
|
Downloads](https://developer.nvidia.com/cuda-12-1-0-download-archive)
|
||||||
and use the target selection wizard to choose your platform and Linux
|
and use the target selection wizard to choose your platform and Linux
|
||||||
distribution. Select an installer type of "runfile (local)" at the
|
distribution. Select an installer type of "runfile (local)" at the
|
||||||
last step.
|
last step.
|
||||||
|
|
||||||
This will provide you with a recipe for downloading and running a
|
This will provide you with a recipe for downloading and running a
|
||||||
install shell script that will install the toolkit and drivers. For
|
install shell script that will install the toolkit and drivers.
|
||||||
example, the install script recipe for Ubuntu 22.04 running on a
|
|
||||||
x86_64 system is:
|
|
||||||
|
|
||||||
```
|
#### 2. Confirm/Install pyTorch 2.1.0 with CUDA 12.1 support
|
||||||
wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run
|
|
||||||
sudo sh cuda_11.8.0_520.61.05_linux.run
|
|
||||||
```
|
|
||||||
|
|
||||||
Rather than cut-and-paste this example, We recommend that you walk
|
|
||||||
through the toolkit wizard in order to get the most up to date
|
|
||||||
installer for your system.
|
|
||||||
|
|
||||||
#### 2. Confirm/Install pyTorch 2.01 with CUDA 11.8 support
|
|
||||||
|
|
||||||
If you are using InvokeAI 3.0.2 or higher, these will already be
|
If you are using InvokeAI 3.0.2 or higher, these will already be
|
||||||
installed. If not, you can check whether you have the needed libraries
|
installed. If not, you can check whether you have the needed libraries
|
||||||
@ -133,7 +122,7 @@ Then run the command:
|
|||||||
python -c 'exec("import torch\nprint(torch.__version__)")'
|
python -c 'exec("import torch\nprint(torch.__version__)")'
|
||||||
```
|
```
|
||||||
|
|
||||||
If it prints __1.13.1+cu118__ you're good. If not, you can install the
|
If it prints __2.1.0+cu121__ you're good. If not, you can install the
|
||||||
most up to date libraries with this command:
|
most up to date libraries with this command:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
@ -18,13 +18,18 @@ either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
|||||||
driver).
|
driver).
|
||||||
|
|
||||||
|
|
||||||
## **[Automated Installer](010_INSTALL_AUTOMATED.md)**
|
## **[Automated Installer (Recommended)](010_INSTALL_AUTOMATED.md)**
|
||||||
✅ This is the recommended installation method for first-time users.
|
✅ This is the recommended installation method for first-time users.
|
||||||
|
|
||||||
This is a script that will install all of InvokeAI's essential
|
This is a script that will install all of InvokeAI's essential
|
||||||
third party libraries and InvokeAI itself. It includes access to a
|
third party libraries and InvokeAI itself.
|
||||||
"developer console" which will help us debug problems with you and
|
|
||||||
give you to access experimental features.
|
🖥️ **Download the latest installer .zip file here** : https://github.com/invoke-ai/InvokeAI/releases/latest
|
||||||
|
|
||||||
|
- *Look for the file labelled "InvokeAI-installer-v3.X.X.zip" at the bottom of the page*
|
||||||
|
- If you experience issues, read through the full [installation instructions](010_INSTALL_AUTOMATED.md) to make sure you have met all of the installation requirements. If you need more help, join the [Discord](discord.gg/invoke-ai) or create an issue on [Github](https://github.com/invoke-ai/InvokeAI).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## **[Manual Installation](020_INSTALL_MANUAL.md)**
|
## **[Manual Installation](020_INSTALL_MANUAL.md)**
|
||||||
This method is recommended for experienced users and developers.
|
This method is recommended for experienced users and developers.
|
||||||
|
@ -79,7 +79,7 @@ title: Manual Installation, Linux
|
|||||||
and obtaining an access token for downloading. It will then download and
|
and obtaining an access token for downloading. It will then download and
|
||||||
install the weights files for you.
|
install the weights files for you.
|
||||||
|
|
||||||
Please look [here](../INSTALL_MANUAL.md) for a manual process for doing
|
Please look [here](../020_INSTALL_MANUAL.md) for a manual process for doing
|
||||||
the same thing.
|
the same thing.
|
||||||
|
|
||||||
7. Start generating images!
|
7. Start generating images!
|
||||||
@ -112,7 +112,7 @@ title: Manual Installation, Linux
|
|||||||
To use an alternative model you may invoke the `!switch` command in
|
To use an alternative model you may invoke the `!switch` command in
|
||||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||||
either the CLI or the Web UI. See [Command Line
|
either the CLI or the Web UI. See [Command Line
|
||||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
Client](../../deprecated/CLI.md#model-selection-and-importation). The
|
||||||
model names are defined in `configs/models.yaml`.
|
model names are defined in `configs/models.yaml`.
|
||||||
|
|
||||||
8. Subsequently, to relaunch the script, be sure to run "conda activate
|
8. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||||
|
@ -150,7 +150,7 @@ will do our best to help.
|
|||||||
To use an alternative model you may invoke the `!switch` command in
|
To use an alternative model you may invoke the `!switch` command in
|
||||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||||
either the CLI or the Web UI. See [Command Line
|
either the CLI or the Web UI. See [Command Line
|
||||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
Client](../../deprecated/CLI.md#model-selection-and-importation). The
|
||||||
model names are defined in `configs/models.yaml`.
|
model names are defined in `configs/models.yaml`.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -128,7 +128,7 @@ python scripts/invoke.py --web --max_load_models=3 \
|
|||||||
```
|
```
|
||||||
|
|
||||||
These options are described in detail in the
|
These options are described in detail in the
|
||||||
[Command-Line Interface](../../features/CLI.md) documentation.
|
[Command-Line Interface](../../deprecated/CLI.md) documentation.
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
|||||||
obtaining an access token for downloading. It will then download and install the
|
obtaining an access token for downloading. It will then download and install the
|
||||||
weights files for you.
|
weights files for you.
|
||||||
|
|
||||||
Please look [here](../INSTALL_MANUAL.md) for a manual process for doing the
|
Please look [here](../020_INSTALL_MANUAL.md) for a manual process for doing the
|
||||||
same thing.
|
same thing.
|
||||||
|
|
||||||
8. Start generating images!
|
8. Start generating images!
|
||||||
@ -108,7 +108,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
|||||||
To use an alternative model you may invoke the `!switch` command in
|
To use an alternative model you may invoke the `!switch` command in
|
||||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||||
either the CLI or the Web UI. See [Command Line
|
either the CLI or the Web UI. See [Command Line
|
||||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
Client](../../deprecated/CLI.md#model-selection-and-importation). The
|
||||||
model names are defined in `configs/models.yaml`.
|
model names are defined in `configs/models.yaml`.
|
||||||
|
|
||||||
9. Subsequently, to relaunch the script, first activate the Anaconda
|
9. Subsequently, to relaunch the script, first activate the Anaconda
|
||||||
|
10
docs/javascripts/init_kapa_widget.js
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
document.addEventListener("DOMContentLoaded", function () {
|
||||||
|
var script = document.createElement("script");
|
||||||
|
script.src = "https://widget.kapa.ai/kapa-widget.bundle.js";
|
||||||
|
script.setAttribute("data-website-id", "b5973bb1-476b-451e-8cf4-98de86745a10");
|
||||||
|
script.setAttribute("data-project-name", "Invoke.AI");
|
||||||
|
script.setAttribute("data-project-color", "#11213C");
|
||||||
|
script.setAttribute("data-project-logo", "https://avatars.githubusercontent.com/u/113954515?s=280&v=4");
|
||||||
|
script.async = true;
|
||||||
|
document.head.appendChild(script);
|
||||||
|
});
|
@ -6,10 +6,17 @@ If you're not familiar with Diffusion, take a look at our [Diffusion Overview.](
|
|||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
|
### Workflow Library
|
||||||
|
The Workflow Library enables you to save workflows to the Invoke database, allowing you to easily creating, modify and share workflows as needed.
|
||||||
|
|
||||||
|
A curated set of workflows are provided by default - these are designed to help explain important nodes' usage in the Workflow Editor.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
### Linear View
|
### Linear View
|
||||||
The Workflow Editor allows you to create a UI for your workflow, to make it easier to iterate on your generations.
|
The Workflow Editor allows you to create a UI for your workflow, to make it easier to iterate on your generations.
|
||||||
|
|
||||||
To add an input to the Linear UI, right click on the input label and select "Add to Linear View".
|
To add an input to the Linear UI, right click on the **input label** and select "Add to Linear View".
|
||||||
|
|
||||||
The Linear UI View will also be part of the saved workflow, allowing you share workflows and enable other to use them, regardless of complexity.
|
The Linear UI View will also be part of the saved workflow, allowing you share workflows and enable other to use them, regardless of complexity.
|
||||||
|
|
||||||
@ -30,7 +37,7 @@ Any node or input field can be renamed in the workflow editor. If the input fiel
|
|||||||
Nodes have a "Use Cache" option in their footer. This allows for performance improvements by using the previously cached values during the workflow processing.
|
Nodes have a "Use Cache" option in their footer. This allows for performance improvements by using the previously cached values during the workflow processing.
|
||||||
|
|
||||||
|
|
||||||
## Important Concepts
|
## Important Nodes & Concepts
|
||||||
|
|
||||||
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
|
There are several node grouping concepts that can be examined with a narrow focus. These (and other) groupings can be pieced together to make up functional graph setups, and are important to understanding how groups of nodes work together as part of a whole. Note that the screenshots below aren't examples of complete functioning node graphs (see Examples).
|
||||||
|
|
||||||
@ -56,7 +63,7 @@ The ImageToLatents node takes in a pixel image and a VAE and outputs a latents.
|
|||||||
|
|
||||||
It is common to want to use both the same seed (for continuity) and random seeds (for variety). To define a seed, simply enter it into the 'Seed' field on a noise node. Conversely, the RandomInt node generates a random integer between 'Low' and 'High', and can be used as input to the 'Seed' edge point on a noise node to randomize your seed.
|
It is common to want to use both the same seed (for continuity) and random seeds (for variety). To define a seed, simply enter it into the 'Seed' field on a noise node. Conversely, the RandomInt node generates a random integer between 'Low' and 'High', and can be used as input to the 'Seed' edge point on a noise node to randomize your seed.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
### ControlNet
|
### ControlNet
|
||||||
|
|
||||||
|
@ -4,32 +4,141 @@ These are nodes that have been developed by the community, for the community. If
|
|||||||
|
|
||||||
If you'd like to submit a node for the community, please refer to the [node creation overview](contributingNodes.md).
|
If you'd like to submit a node for the community, please refer to the [node creation overview](contributingNodes.md).
|
||||||
|
|
||||||
To download a node, simply download the `.py` node file from the link and add it to the `invokeai/app/invocations` folder in your Invoke AI install location. If you used the automated installation, this can be found inside the `.venv` folder. Along with the node, an example node graph should be provided to help you get started with the node.
|
To use a node, add the node to the `nodes` folder found in your InvokeAI install location.
|
||||||
|
|
||||||
|
The suggested method is to use `git clone` to clone the repository the node is found in. This allows for easy updates of the node in the future.
|
||||||
|
|
||||||
|
If you'd prefer, you can also just download the whole node folder from the linked repository and add it to the `nodes` folder.
|
||||||
|
|
||||||
To use a community workflow, download the the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
|
To use a community workflow, download the the `.json` node graph file and load it into Invoke AI via the **Load Workflow** button in the Workflow Editor.
|
||||||
|
|
||||||
--------------------------------
|
- Community Nodes
|
||||||
|
+ [Adapters-Linked](#adapters-linked-nodes)
|
||||||
|
+ [Autostereogram](#autostereogram-nodes)
|
||||||
|
+ [Average Images](#average-images)
|
||||||
|
+ [Clean Image Artifacts After Cut](#clean-image-artifacts-after-cut)
|
||||||
|
+ [Close Color Mask](#close-color-mask)
|
||||||
|
+ [Clothing Mask](#clothing-mask)
|
||||||
|
+ [Contrast Limited Adaptive Histogram Equalization](#contrast-limited-adaptive-histogram-equalization)
|
||||||
|
+ [Depth Map from Wavefront OBJ](#depth-map-from-wavefront-obj)
|
||||||
|
+ [Film Grain](#film-grain)
|
||||||
|
+ [Generative Grammar-Based Prompt Nodes](#generative-grammar-based-prompt-nodes)
|
||||||
|
+ [GPT2RandomPromptMaker](#gpt2randompromptmaker)
|
||||||
|
+ [Grid to Gif](#grid-to-gif)
|
||||||
|
+ [Halftone](#halftone)
|
||||||
|
+ [Hand Refiner with MeshGraphormer](#hand-refiner-with-meshgraphormer)
|
||||||
|
+ [Image and Mask Composition Pack](#image-and-mask-composition-pack)
|
||||||
|
+ [Image Dominant Color](#image-dominant-color)
|
||||||
|
+ [Image to Character Art Image Nodes](#image-to-character-art-image-nodes)
|
||||||
|
+ [Image Picker](#image-picker)
|
||||||
|
+ [Image Resize Plus](#image-resize-plus)
|
||||||
|
+ [Latent Upscale](#latent-upscale)
|
||||||
|
+ [Load Video Frame](#load-video-frame)
|
||||||
|
+ [Make 3D](#make-3d)
|
||||||
|
+ [Mask Operations](#mask-operations)
|
||||||
|
+ [Match Histogram](#match-histogram)
|
||||||
|
+ [Metadata-Linked](#metadata-linked-nodes)
|
||||||
|
+ [Negative Image](#negative-image)
|
||||||
|
+ [Nightmare Promptgen](#nightmare-promptgen)
|
||||||
|
+ [Oobabooga](#oobabooga)
|
||||||
|
+ [Prompt Tools](#prompt-tools)
|
||||||
|
+ [Remote Image](#remote-image)
|
||||||
|
+ [BriaAI Background Remove](#briaai-remove-background)
|
||||||
|
+ [Remove Background](#remove-background)
|
||||||
|
+ [Retroize](#retroize)
|
||||||
|
+ [Size Stepper Nodes](#size-stepper-nodes)
|
||||||
|
+ [Simple Skin Detection](#simple-skin-detection)
|
||||||
|
+ [Text font to Image](#text-font-to-image)
|
||||||
|
+ [Thresholding](#thresholding)
|
||||||
|
+ [Unsharp Mask](#unsharp-mask)
|
||||||
|
+ [XY Image to Grid and Images to Grids nodes](#xy-image-to-grid-and-images-to-grids-nodes)
|
||||||
|
- [Example Node Template](#example-node-template)
|
||||||
|
- [Disclaimer](#disclaimer)
|
||||||
|
- [Help](#help)
|
||||||
|
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Make 3D
|
### Adapters Linked Nodes
|
||||||
|
|
||||||
**Description:** Create compelling 3D stereo images from 2D originals.
|
**Description:** A set of nodes for linked adapters (ControlNet, IP-Adaptor & T2I-Adapter). This allows multiple adapters to be chained together without using a `collect` node which means it can be used inside an `iterate` node without any collecting on every iteration issues.
|
||||||
|
|
||||||
**Node Link:** [https://gitlab.com/srcrr/shift3d/-/raw/main/make3d.py](https://gitlab.com/srcrr/shift3d)
|
- `ControlNet-Linked` - Collects ControlNet info to pass to other nodes.
|
||||||
|
- `IP-Adapter-Linked` - Collects IP-Adapter info to pass to other nodes.
|
||||||
|
- `T2I-Adapter-Linked` - Collects T2I-Adapter info to pass to other nodes.
|
||||||
|
|
||||||
**Example Node Graph:** https://gitlab.com/srcrr/shift3d/-/raw/main/example-workflow.json?ref_type=heads&inline=false
|
Note: These are inherited from the core nodes so any update to the core nodes should be reflected in these.
|
||||||
|
|
||||||
**Output Examples**
|
**Node Link:** https://github.com/skunkworxdark/adapters-linked-nodes
|
||||||
|
|
||||||
{: style="height:512px;width:512px"}
|
|
||||||
{: style="height:512px;width:512px"}
|
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Ideal Size
|
### Autostereogram Nodes
|
||||||
|
|
||||||
**Description:** This node calculates an ideal image size for a first pass of a multi-pass upscaling. The aim is to avoid duplication that results from choosing a size larger than the model is capable of.
|
**Description:** Generate autostereogram images from a depth map. This is not a very practically useful node but more a 90s nostalgic indulgence as I used to love these images as a kid.
|
||||||
|
|
||||||
**Node Link:** https://github.com/JPPhoto/ideal-size-node
|
**Node Link:** https://github.com/skunkworxdark/autostereogram_nodes
|
||||||
|
|
||||||
|
**Example Usage:**
|
||||||
|
</br>
|
||||||
|
<img src="https://github.com/skunkworxdark/autostereogram_nodes/blob/main/images/spider.png" width="200" /> -> <img src="https://github.com/skunkworxdark/autostereogram_nodes/blob/main/images/spider-depth.png" width="200" /> -> <img src="https://github.com/skunkworxdark/autostereogram_nodes/raw/main/images/spider-dots.png" width="200" /> <img src="https://github.com/skunkworxdark/autostereogram_nodes/raw/main/images/spider-pattern.png" width="200" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Average Images
|
||||||
|
|
||||||
|
**Description:** This node takes in a collection of images of the same size and averages them as output. It converts everything to RGB mode first.
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/JPPhoto/average-images-node
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Clean Image Artifacts After Cut
|
||||||
|
|
||||||
|
Description: Removes residual artifacts after an image is separated from its background.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/clean-artifact-after-cut-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/clean-artifact-after-cut-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Close Color Mask
|
||||||
|
|
||||||
|
Description: Generates a mask for images based on a closely matching color, useful for color-based selections.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/close-color-mask-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/close-color-mask-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Clothing Mask
|
||||||
|
|
||||||
|
Description: Employs a U2NET neural network trained for the segmentation of clothing items in images.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/clothing-mask-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/clothing-mask-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Contrast Limited Adaptive Histogram Equalization
|
||||||
|
|
||||||
|
Description: Enhances local image contrast using adaptive histogram equalization with contrast limiting.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/clahe-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/clahe-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Depth Map from Wavefront OBJ
|
||||||
|
|
||||||
|
**Description:** Render depth maps from Wavefront .obj files (triangulated) using this simple 3D renderer utilizing numpy and matplotlib to compute and color the scene. There are simple parameters to change the FOV, camera position, and model orientation.
|
||||||
|
|
||||||
|
To be imported, an .obj must use triangulated meshes, so make sure to enable that option if exporting from a 3D modeling program. This renderer makes each triangle a solid color based on its average depth, so it will cause anomalies if your .obj has large triangles. In Blender, the Remesh modifier can be helpful to subdivide a mesh into small pieces that work well given these limitations.
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/dwringer/depth-from-obj-node
|
||||||
|
|
||||||
|
**Example Usage:**
|
||||||
|
</br><img src="https://raw.githubusercontent.com/dwringer/depth-from-obj-node/main/depth_from_obj_usage.jpg" width="500" />
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Film Grain
|
### Film Grain
|
||||||
@ -39,68 +148,19 @@ To use a community workflow, download the the `.json` node graph file and load i
|
|||||||
**Node Link:** https://github.com/JPPhoto/film-grain-node
|
**Node Link:** https://github.com/JPPhoto/film-grain-node
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Image Picker
|
### Generative Grammar-Based Prompt Nodes
|
||||||
|
|
||||||
**Description:** This InvokeAI node takes in a collection of images and randomly chooses one. This can be useful when you have a number of poses to choose from for a ControlNet node, or a number of input images for another purpose.
|
**Description:** This set of 3 nodes generates prompts from simple user-defined grammar rules (loaded from custom files - examples provided below). The prompts are made by recursively expanding a special template string, replacing nonterminal "parts-of-speech" until no nonterminal terms remain in the string.
|
||||||
|
|
||||||
**Node Link:** https://github.com/JPPhoto/image-picker-node
|
This includes 3 Nodes:
|
||||||
|
- *Lookup Table from File* - loads a YAML file "prompt" section (or of a whole folder of YAML's) into a JSON-ified dictionary (Lookups output)
|
||||||
|
- *Lookups Entry from Prompt* - places a single entry in a new Lookups output under the specified heading
|
||||||
|
- *Prompt from Lookup Table* - uses a Collection of Lookups as grammar rules from which to randomly generate prompts.
|
||||||
|
|
||||||
--------------------------------
|
**Node Link:** https://github.com/dwringer/generative-grammar-prompt-nodes
|
||||||
### Thresholding
|
|
||||||
|
|
||||||
**Description:** This node generates masks for highlights, midtones, and shadows given an input image. You can optionally specify a blur for the lookup table used in making those masks from the source image.
|
**Example Usage:**
|
||||||
|
</br><img src="https://raw.githubusercontent.com/dwringer/generative-grammar-prompt-nodes/main/lookuptables_usage.jpg" width="500" />
|
||||||
**Node Link:** https://github.com/JPPhoto/thresholding-node
|
|
||||||
|
|
||||||
**Examples**
|
|
||||||
|
|
||||||
Input:
|
|
||||||
|
|
||||||
{: style="height:512px;width:512px"}
|
|
||||||
|
|
||||||
Highlights/Midtones/Shadows:
|
|
||||||
|
|
||||||
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/727021c1-36ff-4ec8-90c8-105e00de986d" style="width: 30%" />
|
|
||||||
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/0b721bfc-f051-404e-b905-2f16b824ddfe" style="width: 30%" />
|
|
||||||
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/04c1297f-1c88-42b6-a7df-dd090b976286" style="width: 30%" />
|
|
||||||
|
|
||||||
Highlights/Midtones/Shadows (with LUT blur enabled):
|
|
||||||
|
|
||||||
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/19aa718a-70c1-4668-8169-d68f4bd13771" style="width: 30%" />
|
|
||||||
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/0a440e43-697f-4d17-82ee-f287467df0a5" style="width: 30%" />
|
|
||||||
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/0701fd0f-2ca7-4fe2-8613-2b52547bafce" style="width: 30%" />
|
|
||||||
|
|
||||||
--------------------------------
|
|
||||||
### Halftone
|
|
||||||
|
|
||||||
**Description**: Halftone converts the source image to grayscale and then performs halftoning. CMYK Halftone converts the image to CMYK and applies a per-channel halftoning to make the source image look like a magazine or newspaper. For both nodes, you can specify angles and halftone dot spacing.
|
|
||||||
|
|
||||||
**Node Link:** https://github.com/JPPhoto/halftone-node
|
|
||||||
|
|
||||||
**Example**
|
|
||||||
|
|
||||||
Input:
|
|
||||||
|
|
||||||
{: style="height:512px;width:512px"}
|
|
||||||
|
|
||||||
Halftone Output:
|
|
||||||
|
|
||||||
{: style="height:512px;width:512px"}
|
|
||||||
|
|
||||||
CMYK Halftone Output:
|
|
||||||
|
|
||||||
{: style="height:512px;width:512px"}
|
|
||||||
|
|
||||||
--------------------------------
|
|
||||||
### Retroize
|
|
||||||
|
|
||||||
**Description:** Retroize is a collection of nodes for InvokeAI to "Retroize" images. Any image can be given a fresh coat of retro paint with these nodes, either from your gallery or from within the graph itself. It includes nodes to pixelize, quantize, palettize, and ditherize images; as well as to retrieve palettes from existing images.
|
|
||||||
|
|
||||||
**Node Link:** https://github.com/Ar7ific1al/invokeai-retroizeinode/
|
|
||||||
|
|
||||||
**Retroize Output Examples**
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### GPT2RandomPromptMaker
|
### GPT2RandomPromptMaker
|
||||||
@ -113,78 +173,56 @@ CMYK Halftone Output:
|
|||||||
|
|
||||||
Generated Prompt: An enchanted weapon will be usable by any character regardless of their alignment.
|
Generated Prompt: An enchanted weapon will be usable by any character regardless of their alignment.
|
||||||
|
|
||||||

|
<img src="https://github.com/mickr777/InvokeAI/assets/115216705/8496ba09-bcdd-4ff7-8076-ff213b6a1e4c" width="200" />
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Load Video Frame
|
### Grid to Gif
|
||||||
|
|
||||||
**Description:** This is a video frame image provider + indexer/video creation nodes for hooking up to iterators and ranges and ControlNets and such for invokeAI node experimentation. Think animation + ControlNet outputs.
|
**Description:** One node that turns a grid image into an image collection, one node that turns an image collection into a gif.
|
||||||
|
|
||||||
**Node Link:** https://github.com/helix4u/load_video_frame
|
**Node Link:** https://github.com/mildmisery/invokeai-GridToGifNode/blob/main/GridToGif.py
|
||||||
|
|
||||||
**Example Node Graph:** https://github.com/helix4u/load_video_frame/blob/main/Example_Workflow.json
|
**Example Node Graph:** https://github.com/mildmisery/invokeai-GridToGifNode/blob/main/Grid%20to%20Gif%20Example%20Workflow.json
|
||||||
|
|
||||||
**Output Example:**
|
**Output Examples**
|
||||||
|
|
||||||

|
<img src="https://raw.githubusercontent.com/mildmisery/invokeai-GridToGifNode/main/input.png" width="300" />
|
||||||
[Full mp4 of Example Output test.mp4](https://github.com/helix4u/load_video_frame/blob/main/test.mp4)
|
<img src="https://raw.githubusercontent.com/mildmisery/invokeai-GridToGifNode/main/output.gif" width="300" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Halftone
|
||||||
|
|
||||||
|
**Description**: Halftone converts the source image to grayscale and then performs halftoning. CMYK Halftone converts the image to CMYK and applies a per-channel halftoning to make the source image look like a magazine or newspaper. For both nodes, you can specify angles and halftone dot spacing.
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/JPPhoto/halftone-node
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Input:
|
||||||
|
|
||||||
|
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/fd5efb9f-4355-4409-a1c2-c1ca99e0cab4" width="300" />
|
||||||
|
|
||||||
|
Halftone Output:
|
||||||
|
|
||||||
|
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/7e606f29-e68f-4d46-b3d5-97f799a4ec2f" width="300" />
|
||||||
|
|
||||||
|
CMYK Halftone Output:
|
||||||
|
|
||||||
|
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/c59c578f-db8e-4d66-8c66-2851752d75ea" width="300" />
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
### Oobabooga
|
### Hand Refiner with MeshGraphormer
|
||||||
|
|
||||||
**Description:** asks a local LLM running in Oobabooga's Text-Generation-Webui to write a prompt based on the user input.
|
**Description**: Hand Refiner takes in your image and automatically generates a fixed depth map for the hands along with a mask of the hands region that will conveniently allow you to use them along with ControlNet to fix the wonky hands generated by Stable Diffusion
|
||||||
|
|
||||||
**Link:** https://github.com/sammyf/oobabooga-node
|
**Node Link:** https://github.com/blessedcoolant/invoke_meshgraphormer
|
||||||
|
|
||||||
|
**View**
|
||||||
**Example:**
|
<img src="https://raw.githubusercontent.com/blessedcoolant/invoke_meshgraphormer/main/assets/preview.jpg" />
|
||||||
|
|
||||||
"describe a new mystical creature in its natural environment"
|
|
||||||
|
|
||||||
*can return*
|
|
||||||
|
|
||||||
"The mystical creature I am describing to you is called the "Glimmerwing". It is a majestic, iridescent being that inhabits the depths of the most enchanted forests and glimmering lakes. Its body is covered in shimmering scales that reflect every color of the rainbow, and it has delicate, translucent wings that sparkle like diamonds in the sunlight. The Glimmerwing's home is a crystal-clear lake, surrounded by towering trees with leaves that shimmer like jewels. In this serene environment, the Glimmerwing spends its days swimming gracefully through the water, chasing schools of glittering fish and playing with the gentle ripples of the lake's surface.
|
|
||||||
As the sun sets, the Glimmerwing perches on a branch of one of the trees, spreading its wings to catch the last rays of light. The creature's scales glow softly, casting a rainbow of colors across the forest floor. The Glimmerwing sings a haunting melody, its voice echoing through the stillness of the night air. Its song is said to have the power to heal the sick and bring peace to troubled souls. Those who are lucky enough to hear the Glimmerwing's song are forever changed by its beauty and grace."
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
**Requirement**
|
|
||||||
|
|
||||||
a Text-Generation-Webui instance (might work remotely too, but I never tried it) and obviously InvokeAI 3.x
|
|
||||||
|
|
||||||
**Note**
|
|
||||||
|
|
||||||
This node works best with SDXL models, especially as the style can be described independantly of the LLM's output.
|
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Depth Map from Wavefront OBJ
|
|
||||||
|
|
||||||
**Description:** Render depth maps from Wavefront .obj files (triangulated) using this simple 3D renderer utilizing numpy and matplotlib to compute and color the scene. There are simple parameters to change the FOV, camera position, and model orientation.
|
|
||||||
|
|
||||||
To be imported, an .obj must use triangulated meshes, so make sure to enable that option if exporting from a 3D modeling program. This renderer makes each triangle a solid color based on its average depth, so it will cause anomalies if your .obj has large triangles. In Blender, the Remesh modifier can be helpful to subdivide a mesh into small pieces that work well given these limitations.
|
|
||||||
|
|
||||||
**Node Link:** https://github.com/dwringer/depth-from-obj-node
|
|
||||||
|
|
||||||
**Example Usage:**
|
|
||||||

|
|
||||||
|
|
||||||
--------------------------------
|
|
||||||
### Generative Grammar-Based Prompt Nodes
|
|
||||||
|
|
||||||
**Description:** This set of 3 nodes generates prompts from simple user-defined grammar rules (loaded from custom files - examples provided below). The prompts are made by recursively expanding a special template string, replacing nonterminal "parts-of-speech" until no more nonterminal terms remain in the string.
|
|
||||||
|
|
||||||
This includes 3 Nodes:
|
|
||||||
- *Lookup Table from File* - loads a YAML file "prompt" section (or of a whole folder of YAML's) into a JSON-ified dictionary (Lookups output)
|
|
||||||
- *Lookups Entry from Prompt* - places a single entry in a new Lookups output under the specified heading
|
|
||||||
- *Prompt from Lookup Table* - uses a Collection of Lookups as grammar rules from which to randomly generate prompts.
|
|
||||||
|
|
||||||
**Node Link:** https://github.com/dwringer/generative-grammar-prompt-nodes
|
|
||||||
|
|
||||||
**Example Usage:**
|
|
||||||

|
|
||||||
|
|
||||||
--------------------------------
|
|
||||||
### Image and Mask Composition Pack
|
### Image and Mask Composition Pack
|
||||||
|
|
||||||
**Description:** This is a pack of nodes for composing masks and images, including a simple text mask creator and both image and latent offset nodes. The offsets wrap around, so these can be used in conjunction with the Seamless node to progressively generate centered on different parts of the seamless tiling.
|
**Description:** This is a pack of nodes for composing masks and images, including a simple text mask creator and both image and latent offset nodes. The offsets wrap around, so these can be used in conjunction with the Seamless node to progressively generate centered on different parts of the seamless tiling.
|
||||||
@ -209,8 +247,244 @@ This includes 15 Nodes:
|
|||||||
|
|
||||||
**Node Link:** https://github.com/dwringer/composition-nodes
|
**Node Link:** https://github.com/dwringer/composition-nodes
|
||||||
|
|
||||||
**Nodes and Output Examples:**
|
</br><img src="https://raw.githubusercontent.com/dwringer/composition-nodes/main/composition_pack_overview.jpg" width="500" />
|
||||||

|
|
||||||
|
--------------------------------
|
||||||
|
### Image Dominant Color
|
||||||
|
|
||||||
|
Description: Identifies and extracts the dominant color from an image using k-means clustering.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/image-dominant-color-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/image-dominant-color-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Image to Character Art Image Nodes
|
||||||
|
|
||||||
|
**Description:** Group of nodes to convert an input image into ascii/unicode art Image
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/mickr777/imagetoasciiimage
|
||||||
|
|
||||||
|
**Output Examples**
|
||||||
|
|
||||||
|
<img src="https://user-images.githubusercontent.com/115216705/271817646-8e061fcc-9a2c-4fa9-bcc7-c0f7b01e9056.png" width="300" /><img src="https://github.com/mickr777/imagetoasciiimage/assets/115216705/3c4990eb-2f42-46b9-90f9-0088b939dc6a" width="300" /></br>
|
||||||
|
<img src="https://github.com/mickr777/imagetoasciiimage/assets/115216705/fee7f800-a4a8-41e2-a66b-c66e4343307e" width="300" />
|
||||||
|
<img src="https://github.com/mickr777/imagetoasciiimage/assets/115216705/1d9c1003-a45f-45c2-aac7-46470bb89330" width="300" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
### Image Picker
|
||||||
|
|
||||||
|
**Description:** This InvokeAI node takes in a collection of images and randomly chooses one. This can be useful when you have a number of poses to choose from for a ControlNet node, or a number of input images for another purpose.
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/JPPhoto/image-picker-node
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Image Resize Plus
|
||||||
|
|
||||||
|
Description: Provides various image resizing options such as fill, stretch, fit, center, and crop.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/image-resize-plus-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/image-resize-plus-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Latent Upscale
|
||||||
|
|
||||||
|
**Description:** This node uses a small (~2.4mb) model to upscale the latents used in a Stable Diffusion 1.5 or Stable Diffusion XL image generation, rather than the typical interpolation method, avoiding the traditional downsides of the latent upscale technique.
|
||||||
|
|
||||||
|
**Node Link:** [https://github.com/gogurtenjoyer/latent-upscale](https://github.com/gogurtenjoyer/latent-upscale)
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Load Video Frame
|
||||||
|
|
||||||
|
**Description:** This is a video frame image provider + indexer/video creation nodes for hooking up to iterators and ranges and ControlNets and such for invokeAI node experimentation. Think animation + ControlNet outputs.
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/helix4u/load_video_frame
|
||||||
|
|
||||||
|
**Output Example:**
|
||||||
|
<img src="https://raw.githubusercontent.com/helix4u/load_video_frame/main/_git_assets/testmp4_embed_converted.gif" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Make 3D
|
||||||
|
|
||||||
|
**Description:** Create compelling 3D stereo images from 2D originals.
|
||||||
|
|
||||||
|
**Node Link:** [https://gitlab.com/srcrr/shift3d/-/raw/main/make3d.py](https://gitlab.com/srcrr/shift3d)
|
||||||
|
|
||||||
|
**Example Node Graph:** https://gitlab.com/srcrr/shift3d/-/raw/main/example-workflow.json?ref_type=heads&inline=false
|
||||||
|
|
||||||
|
**Output Examples**
|
||||||
|
|
||||||
|
<img src="https://gitlab.com/srcrr/shift3d/-/raw/main/example-1.png" width="300" />
|
||||||
|
<img src="https://gitlab.com/srcrr/shift3d/-/raw/main/example-2.png" width="300" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Mask Operations
|
||||||
|
|
||||||
|
Description: Offers logical operations (OR, SUB, AND) for combining and manipulating image masks.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/mask-operations-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/mask-operations-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Match Histogram
|
||||||
|
|
||||||
|
**Description:** An InvokeAI node to match a histogram from one image to another. This is a bit like the `color correct` node in the main InvokeAI but this works in the YCbCr colourspace and can handle images of different sizes. Also does not require a mask input.
|
||||||
|
- Option to only transfer luminance channel.
|
||||||
|
- Option to save output as grayscale
|
||||||
|
|
||||||
|
A good use case for this node is to normalize the colors of an image that has been through the tiled scaling workflow of my XYGrid Nodes.
|
||||||
|
|
||||||
|
See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/main/README.md
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/skunkworxdark/match_histogram
|
||||||
|
|
||||||
|
**Output Examples**
|
||||||
|
|
||||||
|
<img src="https://github.com/skunkworxdark/match_histogram/assets/21961335/ed12f329-a0ef-444a-9bae-129ed60d6097" width="300" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Metadata Linked Nodes
|
||||||
|
|
||||||
|
**Description:** A set of nodes for Metadata. Collect Metadata from within an `iterate` node & extract metadata from an image.
|
||||||
|
|
||||||
|
- `Metadata Item Linked` - Allows collecting of metadata while within an iterate node with no need for a collect node or conversion to metadata node.
|
||||||
|
- `Metadata From Image` - Provides Metadata from an image.
|
||||||
|
- `Metadata To String` - Extracts a String value of a label from metadata.
|
||||||
|
- `Metadata To Integer` - Extracts an Integer value of a label from metadata.
|
||||||
|
- `Metadata To Float` - Extracts a Float value of a label from metadata.
|
||||||
|
- `Metadata To Scheduler` - Extracts a Scheduler value of a label from metadata.
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/skunkworxdark/metadata-linked-nodes
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Negative Image
|
||||||
|
|
||||||
|
Description: Creates a negative version of an image, effective for visual effects and mask inversion.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/negative-image-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/negative-image-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Nightmare Promptgen
|
||||||
|
|
||||||
|
**Description:** Nightmare Prompt Generator - Uses a local text generation model to create unique imaginative (but usually nightmarish) prompts for InvokeAI. By default, it allows you to choose from some gpt-neo models I finetuned on over 2500 of my own InvokeAI prompts in Compel format, but you're able to add your own, as well. Offers support for replacing any troublesome words with a random choice from list you can also define.
|
||||||
|
|
||||||
|
**Node Link:** [https://github.com/gogurtenjoyer/nightmare-promptgen](https://github.com/gogurtenjoyer/nightmare-promptgen)
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Oobabooga
|
||||||
|
|
||||||
|
**Description:** asks a local LLM running in Oobabooga's Text-Generation-Webui to write a prompt based on the user input.
|
||||||
|
|
||||||
|
**Link:** https://github.com/sammyf/oobabooga-node
|
||||||
|
|
||||||
|
**Example:**
|
||||||
|
|
||||||
|
"describe a new mystical creature in its natural environment"
|
||||||
|
|
||||||
|
*can return*
|
||||||
|
|
||||||
|
"The mystical creature I am describing to you is called the "Glimmerwing". It is a majestic, iridescent being that inhabits the depths of the most enchanted forests and glimmering lakes. Its body is covered in shimmering scales that reflect every color of the rainbow, and it has delicate, translucent wings that sparkle like diamonds in the sunlight. The Glimmerwing's home is a crystal-clear lake, surrounded by towering trees with leaves that shimmer like jewels. In this serene environment, the Glimmerwing spends its days swimming gracefully through the water, chasing schools of glittering fish and playing with the gentle ripples of the lake's surface.
|
||||||
|
As the sun sets, the Glimmerwing perches on a branch of one of the trees, spreading its wings to catch the last rays of light. The creature's scales glow softly, casting a rainbow of colors across the forest floor. The Glimmerwing sings a haunting melody, its voice echoing through the stillness of the night air. Its song is said to have the power to heal the sick and bring peace to troubled souls. Those who are lucky enough to hear the Glimmerwing's song are forever changed by its beauty and grace."
|
||||||
|
|
||||||
|
<img src="https://github.com/sammyf/oobabooga-node/assets/42468608/cecdd820-93dd-4c35-abbf-607e001fb2ed" width="300" />
|
||||||
|
|
||||||
|
**Requirement**
|
||||||
|
|
||||||
|
a Text-Generation-Webui instance (might work remotely too, but I never tried it) and obviously InvokeAI 3.x
|
||||||
|
|
||||||
|
**Note**
|
||||||
|
|
||||||
|
This node works best with SDXL models, especially as the style can be described independently of the LLM's output.
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Prompt Tools
|
||||||
|
|
||||||
|
**Description:** A set of InvokeAI nodes that add general prompt (string) manipulation tools. Designed to accompany the `Prompts From File` node and other prompt generation nodes.
|
||||||
|
|
||||||
|
1. `Prompt To File` - saves a prompt or collection of prompts to a file. one per line. There is an append/overwrite option.
|
||||||
|
2. `PTFields Collect` - Converts image generation fields into a Json format string that can be passed to Prompt to file.
|
||||||
|
3. `PTFields Expand` - Takes Json string and converts it to individual generation parameters. This can be fed from the Prompts to file node.
|
||||||
|
4. `Prompt Strength` - Formats prompt with strength like the weighted format of compel
|
||||||
|
5. `Prompt Strength Combine` - Combines weighted prompts for .and()/.blend()
|
||||||
|
6. `CSV To Index String` - Gets a string from a CSV by index. Includes a Random index option
|
||||||
|
|
||||||
|
The following Nodes are now included in v3.2 of Invoke and are nolonger in this set of tools.<br>
|
||||||
|
- `Prompt Join` -> `String Join`
|
||||||
|
- `Prompt Join Three` -> `String Join Three`
|
||||||
|
- `Prompt Replace` -> `String Replace`
|
||||||
|
- `Prompt Split Neg` -> `String Split Neg`
|
||||||
|
|
||||||
|
|
||||||
|
See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/main/README.md
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/skunkworxdark/Prompt-tools-nodes
|
||||||
|
|
||||||
|
**Workflow Examples**
|
||||||
|
|
||||||
|
<img src="https://github.com/skunkworxdark/prompt-tools/blob/main/images/CSVToIndexStringNode.png" width="300" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Remote Image
|
||||||
|
|
||||||
|
**Description:** This is a pack of nodes to interoperate with other services, be they public websites or bespoke local servers. The pack consists of these nodes:
|
||||||
|
|
||||||
|
- *Load Remote Image* - Lets you load remote images such as a realtime webcam image, an image of the day, or dynamically created images.
|
||||||
|
- *Post Image to Remote Server* - Lets you upload an image to a remote server using an HTTP POST request, eg for storage, display or further processing.
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/fieldOfView/InvokeAI-remote_image
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
### BriaAI Remove Background
|
||||||
|
|
||||||
|
**Description**: Implements one click background removal with BriaAI's new version 1.4 model which seems to be be producing better results than any other previous background removal tool.
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/blessedcoolant/invoke_bria_rmbg
|
||||||
|
|
||||||
|
**View**
|
||||||
|
<img src="https://raw.githubusercontent.com/blessedcoolant/invoke_bria_rmbg/main/assets/preview.jpg" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Remove Background
|
||||||
|
|
||||||
|
Description: An integration of the rembg package to remove backgrounds from images using multiple U2NET models.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/remove-background-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/remove-background-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Retroize
|
||||||
|
|
||||||
|
**Description:** Retroize is a collection of nodes for InvokeAI to "Retroize" images. Any image can be given a fresh coat of retro paint with these nodes, either from your gallery or from within the graph itself. It includes nodes to pixelize, quantize, palettize, and ditherize images; as well as to retrieve palettes from existing images.
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/Ar7ific1al/invokeai-retroizeinode/
|
||||||
|
|
||||||
|
**Retroize Output Examples**
|
||||||
|
|
||||||
|
<img src="https://github.com/Ar7ific1al/InvokeAI_nodes_retroize/assets/2306586/de8b4fa6-324c-4c2d-b36c-297600c73974" width="500" />
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
|
### Simple Skin Detection
|
||||||
|
|
||||||
|
Description: Detects skin in images based on predefined color thresholds.
|
||||||
|
|
||||||
|
Node Link: https://github.com/VeyDlin/simple-skin-detection-node
|
||||||
|
|
||||||
|
View:
|
||||||
|
</br><img src="https://raw.githubusercontent.com/VeyDlin/simple-skin-detection-node/master/.readme/node.png" width="500" />
|
||||||
|
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
### Size Stepper Nodes
|
### Size Stepper Nodes
|
||||||
@ -222,10 +496,9 @@ A third node is included, *Random Switch (Integers)*, which is just a generic ve
|
|||||||
**Node Link:** https://github.com/dwringer/size-stepper-nodes
|
**Node Link:** https://github.com/dwringer/size-stepper-nodes
|
||||||
|
|
||||||
**Example Usage:**
|
**Example Usage:**
|
||||||

|
</br><img src="https://raw.githubusercontent.com/dwringer/size-stepper-nodes/main/size_nodes_usage.jpg" width="500" />
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
### Text font to Image
|
### Text font to Image
|
||||||
|
|
||||||
**Description:** text font to text image node for InvokeAI, download a font to use (or if in font cache uses it from there), the text is always resized to the image size, but can control that with padding, optional 2nd line
|
**Description:** text font to text image node for InvokeAI, download a font to use (or if in font cache uses it from there), the text is always resized to the image size, but can control that with padding, optional 2nd line
|
||||||
@ -234,91 +507,83 @@ A third node is included, *Random Switch (Integers)*, which is just a generic ve
|
|||||||
|
|
||||||
**Output Examples**
|
**Output Examples**
|
||||||
|
|
||||||

|
<img src="https://github.com/mickr777/InvokeAI/assets/115216705/c21b0af3-d9c6-4c16-9152-846a23effd36" width="300" />
|
||||||
|
|
||||||
Results after using the depth controlnet
|
Results after using the depth controlnet
|
||||||
|
|
||||||

|
<img src="https://github.com/mickr777/InvokeAI/assets/115216705/915f1a53-968e-43eb-aa61-07cd8f1a733a" width="300" />
|
||||||

|
<img src="https://github.com/mickr777/InvokeAI/assets/115216705/821ef89e-8a60-44f5-b94e-471a9d8690cc" width="300" />
|
||||||

|
<img src="https://github.com/mickr777/InvokeAI/assets/115216705/2befcb6d-49f4-4bfd-b5fc-1fee19274f89" width="300" />
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
### Thresholding
|
||||||
|
|
||||||
### Prompt Tools
|
**Description:** This node generates masks for highlights, midtones, and shadows given an input image. You can optionally specify a blur for the lookup table used in making those masks from the source image.
|
||||||
|
|
||||||
**Description:** A set of InvokeAI nodes that add general prompt manipulation tools. These where written to accompany the PromptsFromFile node and other prompt generation nodes.
|
**Node Link:** https://github.com/JPPhoto/thresholding-node
|
||||||
|
|
||||||
1. PromptJoin - Joins to prompts into one.
|
**Examples**
|
||||||
2. PromptReplace - performs a search and replace on a prompt. With the option of using regex.
|
|
||||||
3. PromptSplitNeg - splits a prompt into positive and negative using the old V2 method of [] for negative.
|
|
||||||
4. PromptToFile - saves a prompt or collection of prompts to a file. one per line. There is an append/overwrite option.
|
|
||||||
5. PTFieldsCollect - Converts image generation fields into a Json format string that can be passed to Prompt to file.
|
|
||||||
6. PTFieldsExpand - Takes Json string and converts it to individual generation parameters This can be fed from the Prompts to file node.
|
|
||||||
7. PromptJoinThree - Joins 3 prompt together.
|
|
||||||
8. PromptStrength - This take a string and float and outputs another string in the format of (string)strength like the weighted format of compel.
|
|
||||||
9. PromptStrengthCombine - This takes a collection of prompt strength strings and outputs a string in the .and() or .blend() format that can be fed into a proper prompt node.
|
|
||||||
|
|
||||||
See full docs here: https://github.com/skunkworxdark/Prompt-tools-nodes/edit/main/README.md
|
Input:
|
||||||
|
|
||||||
**Node Link:** https://github.com/skunkworxdark/Prompt-tools-nodes
|
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/c88ada13-fb3d-484c-a4fe-947b44712632" width="300" />
|
||||||
|
|
||||||
|
Highlights/Midtones/Shadows:
|
||||||
|
|
||||||
|
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/727021c1-36ff-4ec8-90c8-105e00de986d" width="300" />
|
||||||
|
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/0b721bfc-f051-404e-b905-2f16b824ddfe" width="300" />
|
||||||
|
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/04c1297f-1c88-42b6-a7df-dd090b976286" width="300" />
|
||||||
|
|
||||||
|
Highlights/Midtones/Shadows (with LUT blur enabled):
|
||||||
|
|
||||||
|
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/19aa718a-70c1-4668-8169-d68f4bd13771" width="300" />
|
||||||
|
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/0a440e43-697f-4d17-82ee-f287467df0a5" width="300" />
|
||||||
|
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/0701fd0f-2ca7-4fe2-8613-2b52547bafce" width="300" />
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
### Unsharp Mask
|
||||||
|
|
||||||
|
**Description:** Applies an unsharp mask filter to an image, preserving its alpha channel in the process.
|
||||||
|
|
||||||
|
**Node Link:** https://github.com/JPPhoto/unsharp-mask-node
|
||||||
|
|
||||||
|
--------------------------------
|
||||||
### XY Image to Grid and Images to Grids nodes
|
### XY Image to Grid and Images to Grids nodes
|
||||||
|
|
||||||
**Description:** Image to grid nodes and supporting tools.
|
**Description:** These nodes add the following to InvokeAI:
|
||||||
|
- Generate grids of images from multiple input images
|
||||||
1. "Images To Grids" node - Takes a collection of images and creates a grid(s) of images. If there are more images than the size of a single grid then mutilple grids will be created until it runs out of images.
|
- Create XY grid images with labels from parameters
|
||||||
2. "XYImage To Grid" node - Converts a collection of XYImages into a labeled Grid of images. The XYImages collection has to be built using the supporoting nodes. See example node setups for more details.
|
- Split images into overlapping tiles for processing (for super-resolution workflows)
|
||||||
|
- Recombine image tiles into a single output image blending the seams
|
||||||
|
|
||||||
|
The nodes include:
|
||||||
|
1. `Images To Grids` - Combine multiple images into a grid of images
|
||||||
|
2. `XYImage To Grid` - Take X & Y params and creates a labeled image grid.
|
||||||
|
3. `XYImage Tiles` - Super-resolution (embiggen) style tiled resizing
|
||||||
|
4. `Image Tot XYImages` - Takes an image and cuts it up into a number of columns and rows.
|
||||||
|
5. Multiple supporting nodes - Helper nodes for data wrangling and building `XYImage` collections
|
||||||
|
|
||||||
See full docs here: https://github.com/skunkworxdark/XYGrid_nodes/edit/main/README.md
|
See full docs here: https://github.com/skunkworxdark/XYGrid_nodes/edit/main/README.md
|
||||||
|
|
||||||
**Node Link:** https://github.com/skunkworxdark/XYGrid_nodes
|
**Node Link:** https://github.com/skunkworxdark/XYGrid_nodes
|
||||||
|
|
||||||
--------------------------------
|
|
||||||
|
|
||||||
### Image to Character Art Image Node's
|
|
||||||
|
|
||||||
**Description:** Group of nodes to convert an input image into ascii/unicode art Image
|
|
||||||
|
|
||||||
**Node Link:** https://github.com/mickr777/imagetoasciiimage
|
|
||||||
|
|
||||||
**Output Examples**
|
**Output Examples**
|
||||||
|
|
||||||
<img src="https://github.com/invoke-ai/InvokeAI/assets/115216705/8e061fcc-9a2c-4fa9-bcc7-c0f7b01e9056" width="300" />
|
<img src="https://github.com/skunkworxdark/XYGrid_nodes/blob/main/images/collage.png" width="300" />
|
||||||
<img src="https://github.com/mickr777/imagetoasciiimage/assets/115216705/3c4990eb-2f42-46b9-90f9-0088b939dc6a" width="300" /></br>
|
|
||||||
<img src="https://github.com/mickr777/imagetoasciiimage/assets/115216705/fee7f800-a4a8-41e2-a66b-c66e4343307e" width="300" />
|
|
||||||
<img src="https://github.com/mickr777/imagetoasciiimage/assets/115216705/1d9c1003-a45f-45c2-aac7-46470bb89330" width="300" />
|
|
||||||
|
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
### Grid to Gif
|
|
||||||
|
|
||||||
**Description:** One node that turns a grid image into an image colletion, one node that turns an image collection into a gif
|
|
||||||
|
|
||||||
**Node Link:** https://github.com/mildmisery/invokeai-GridToGifNode/blob/main/GridToGif.py
|
|
||||||
|
|
||||||
**Example Node Graph:** https://github.com/mildmisery/invokeai-GridToGifNode/blob/main/Grid%20to%20Gif%20Example%20Workflow.json
|
|
||||||
|
|
||||||
**Output Examples**
|
|
||||||
|
|
||||||
<img src="https://raw.githubusercontent.com/mildmisery/invokeai-GridToGifNode/main/input.png" width="300" />
|
|
||||||
<img src="https://raw.githubusercontent.com/mildmisery/invokeai-GridToGifNode/main/output.gif" width="300" />
|
|
||||||
|
|
||||||
--------------------------------
|
|
||||||
|
|
||||||
### Example Node Template
|
### Example Node Template
|
||||||
|
|
||||||
**Description:** This node allows you to do super cool things with InvokeAI.
|
**Description:** This node allows you to do super cool things with InvokeAI.
|
||||||
|
|
||||||
**Node Link:** https://github.com/invoke-ai/InvokeAI/fake_node.py
|
**Node Link:** https://github.com/invoke-ai/InvokeAI/blob/main/invokeai/app/invocations/prompt.py
|
||||||
|
|
||||||
**Example Node Graph:** https://github.com/invoke-ai/InvokeAI/fake_node_graph.json
|
**Example Workflow:** https://github.com/invoke-ai/InvokeAI/blob/docs/main/docs/workflows/Prompt_from_File.json
|
||||||
|
|
||||||
**Output Examples**
|
**Output Examples**
|
||||||
|
|
||||||
{: style="height:115px;width:240px"}
|
</br><img src="https://invoke-ai.github.io/InvokeAI/assets/invoke_ai_banner.png" width="500" />
|
||||||
|
|
||||||
|
|
||||||
## Disclaimer
|
## Disclaimer
|
||||||
|
@ -4,7 +4,7 @@ To learn about the specifics of creating a new node, please visit our [Node crea
|
|||||||
|
|
||||||
Once you’ve created a node and confirmed that it behaves as expected locally, follow these steps:
|
Once you’ve created a node and confirmed that it behaves as expected locally, follow these steps:
|
||||||
|
|
||||||
- Make sure the node is contained in a new Python (.py) file. Preferrably, the node is in a repo with a README detaling the nodes usage & examples to help others more easily use your node.
|
- Make sure the node is contained in a new Python (.py) file. Preferably, the node is in a repo with a README detailing the nodes usage & examples to help others more easily use your node. Including the tag "invokeai-node" in your repository's README can also help other users find it more easily.
|
||||||
- Submit a pull request with a link to your node(s) repo in GitHub against the `main` branch to add the node to the [Community Nodes](communityNodes.md) list
|
- Submit a pull request with a link to your node(s) repo in GitHub against the `main` branch to add the node to the [Community Nodes](communityNodes.md) list
|
||||||
- Make sure you are following the template below and have provided all relevant details about the node and what it does. Example output images and workflows are very helpful for other users looking to use your node.
|
- Make sure you are following the template below and have provided all relevant details about the node and what it does. Example output images and workflows are very helpful for other users looking to use your node.
|
||||||
- A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you may be asked for permission to include it in the core project.
|
- A maintainer will review the pull request and node. If the node is aligned with the direction of the project, you may be asked for permission to include it in the core project.
|
||||||
|
@ -1,13 +1,15 @@
|
|||||||
# List of Default Nodes
|
# List of Default Nodes
|
||||||
|
|
||||||
The table below contains a list of the default nodes shipped with InvokeAI and their descriptions.
|
The table below contains a list of the default nodes shipped with InvokeAI and
|
||||||
|
their descriptions.
|
||||||
|
|
||||||
| Node <img width=160 align="right"> | Function |
|
| Node <img width=160 align="right"> | Function |
|
||||||
|: ---------------------------------- | :--------------------------------------------------------------------------------------|
|
| :------------------------------------------------------------ | :--------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| Add Integers | Adds two numbers |
|
| Add Integers | Adds two numbers |
|
||||||
| Boolean Primitive Collection | A collection of boolean primitive values |
|
| Boolean Primitive Collection | A collection of boolean primitive values |
|
||||||
| Boolean Primitive | A boolean primitive value |
|
| Boolean Primitive | A boolean primitive value |
|
||||||
| Canny Processor | Canny edge detection for ControlNet |
|
| Canny Processor | Canny edge detection for ControlNet |
|
||||||
|
| CenterPadCrop | Pad or crop an image's sides from the center by specified pixels. Positive values are outside of the image. |
|
||||||
| CLIP Skip | Skip layers in clip text_encoder model. |
|
| CLIP Skip | Skip layers in clip text_encoder model. |
|
||||||
| Collect | Collects values into a collection |
|
| Collect | Collects values into a collection |
|
||||||
| Color Correct | Shifts the colors of a target image to match the reference image, optionally using a mask to only color-correct certain regions of the target image. |
|
| Color Correct | Shifts the colors of a target image to match the reference image, optionally using a mask to only color-correct certain regions of the target image. |
|
||||||
@ -34,6 +36,7 @@ The table below contains a list of the default nodes shipped with InvokeAI and t
|
|||||||
| Integer Math | Perform basic math operations on two integers |
|
| Integer Math | Perform basic math operations on two integers |
|
||||||
| Convert Image Mode | Converts an image to a different mode. |
|
| Convert Image Mode | Converts an image to a different mode. |
|
||||||
| Crop Image | Crops an image to a specified box. The box can be outside of the image. |
|
| Crop Image | Crops an image to a specified box. The box can be outside of the image. |
|
||||||
|
| Ideal Size | Calculates an ideal image size for latents for a first pass of a multi-pass upscaling to avoid duplication and other artifacts |
|
||||||
| Image Hue Adjustment | Adjusts the Hue of an image. |
|
| Image Hue Adjustment | Adjusts the Hue of an image. |
|
||||||
| Inverse Lerp Image | Inverse linear interpolation of all pixels of an image |
|
| Inverse Lerp Image | Inverse linear interpolation of all pixels of an image |
|
||||||
| Image Primitive | An image primitive value |
|
| Image Primitive | An image primitive value |
|
||||||
@ -74,11 +77,11 @@ The table below contains a list of the default nodes shipped with InvokeAI and t
|
|||||||
| Noise | Generates latent noise. |
|
| Noise | Generates latent noise. |
|
||||||
| Normal BAE Processor | Applies NormalBae processing to image |
|
| Normal BAE Processor | Applies NormalBae processing to image |
|
||||||
| ONNX Latents to Image | Generates an image from latents. |
|
| ONNX Latents to Image | Generates an image from latents. |
|
||||||
|ONNX Prompt (Raw) | A node to process inputs and produce outputs. May use dependency injection in __init__ to receive providers.|
|
| ONNX Prompt (Raw) | A node to process inputs and produce outputs. May use dependency injection in **init** to receive providers. |
|
||||||
| ONNX Text to Latents | Generates latents from conditionings. |
|
| ONNX Text to Latents | Generates latents from conditionings. |
|
||||||
| ONNX Model Loader | Loads a main model, outputting its submodels. |
|
| ONNX Model Loader | Loads a main model, outputting its submodels. |
|
||||||
| OpenCV Inpaint | Simple inpaint using opencv. |
|
| OpenCV Inpaint | Simple inpaint using opencv. |
|
||||||
|Openpose Processor | Applies Openpose processing to image|
|
| DW Openpose Processor | Applies Openpose processing to image |
|
||||||
| PIDI Processor | Applies PIDI processing to image |
|
| PIDI Processor | Applies PIDI processing to image |
|
||||||
| Prompts from File | Loads prompts from a text file |
|
| Prompts from File | Loads prompts from a text file |
|
||||||
| Random Integer | Outputs a single random integer. |
|
| Random Integer | Outputs a single random integer. |
|
||||||
|
@ -1,14 +1,18 @@
|
|||||||
# Example Workflows
|
# Example Workflows
|
||||||
|
|
||||||
We've curated some example workflows for you to get started with Workflows in InvokeAI
|
We've curated some example workflows for you to get started with Workflows in InvokeAI! These can also be found in the Workflow Library, located in the Workflow Editor of Invoke.
|
||||||
|
|
||||||
To use them, right click on your desired workflow, press "Download Linked File". You can then use the "Load Workflow" functionality in InvokeAI to load the workflow and start generating images!
|
To use them, right click on your desired workflow, follow the link to GitHub and click the "⬇" button to download the raw file. You can then use the "Load Workflow" functionality in InvokeAI to load the workflow and start generating images!
|
||||||
|
|
||||||
If you're interested in finding more workflows, checkout the [#share-your-workflows](https://discord.com/channels/1020123559063990373/1130291608097661000) channel in the InvokeAI Discord.
|
If you're interested in finding more workflows, checkout the [#share-your-workflows](https://discord.com/channels/1020123559063990373/1130291608097661000) channel in the InvokeAI Discord.
|
||||||
|
|
||||||
* [SD1.5 / SD2 Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Text_to_Image.json)
|
* [SD1.5 / SD2 Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Text_to_Image.json)
|
||||||
* [SDXL Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/SDXL_Text_to_Image.json)
|
* [SDXL Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/SDXL_Text_to_Image.json)
|
||||||
* [SDXL (with Refiner) Text to Image](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/SDXL_Text_to_Image.json)
|
* [SDXL Text to Image with Refiner](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/SDXL_w_Refiner_Text_to_Image.json)
|
||||||
* [Tiled Upscaling with ControlNet](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/ESRGAN_img2img_upscale w_Canny_ControlNet.json)
|
* [Multi ControlNet (Canny & Depth)](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Multi_ControlNet_Canny_and_Depth.json)
|
||||||
|
* [Tiled Upscaling with ControlNet](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/ESRGAN_img2img_upscale_w_Canny_ControlNet.json)
|
||||||
|
* [Prompt From File](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Prompt_from_File.json)
|
||||||
|
* [Face Detailer with IP-Adapter & ControlNet](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/Face_Detailer_with_IP-Adapter_and_Canny.json)
|
||||||
* [FaceMask](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/FaceMask.json)
|
* [FaceMask](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/FaceMask.json)
|
||||||
* [FaceOff with 2x Face Scaling](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/FaceOff_FaceScale2x.json)
|
* [FaceOff with 2x Face Scaling](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/FaceOff_FaceScale2x.json)
|
||||||
|
* [QR Code Monster](https://github.com/invoke-ai/InvokeAI/blob/main/docs/workflows/QR_Code_Monster.json)
|
||||||
|
@ -13,46 +13,69 @@ We thank them for all of their time and hard work.
|
|||||||
|
|
||||||
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
|
- [Lincoln D. Stein](mailto:lincoln.stein@gmail.com)
|
||||||
|
|
||||||
## **Current core team**
|
## **Current Core Team**
|
||||||
|
|
||||||
* @lstein (Lincoln Stein) - Co-maintainer
|
* @lstein (Lincoln Stein) - Co-maintainer
|
||||||
* @blessedcoolant - Co-maintainer
|
* @blessedcoolant - Co-maintainer
|
||||||
* @hipsterusername (Kent Keirsey) - Co-maintainer, CEO, Positive Vibes
|
* @hipsterusername (Kent Keirsey) - Co-maintainer, CEO, Positive Vibes
|
||||||
* @psychedelicious (Spencer Mabrito) - Web Team Leader
|
* @psychedelicious (Spencer Mabrito) - Web Team Leader
|
||||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
* @chainchompa (Jennifer Player) - Web Development & Chain-Chomping
|
||||||
* @damian0815 - Attention Systems and Compel Maintainer
|
* @josh is toast (Josh Corbett) - Web Development
|
||||||
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
|
||||||
* @genomancer (Gregg Helt) - Controlnet support
|
|
||||||
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
|
|
||||||
* @cheerio (Mary Rogers) - Lead Engineer & Web App Development
|
* @cheerio (Mary Rogers) - Lead Engineer & Web App Development
|
||||||
|
* @ebr (Eugene Brodsky) - Cloud/DevOps/Sofware engineer; your friendly neighbourhood cluster-autoscaler
|
||||||
|
* @sunija - Standalone version
|
||||||
|
* @genomancer (Gregg Helt) - Controlnet support
|
||||||
* @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
|
* @brandon (Brandon Rising) - Platform, Infrastructure, Backend Systems
|
||||||
* @ryanjdick (Ryan Dick) - Machine Learning & Training
|
* @ryanjdick (Ryan Dick) - Machine Learning & Training
|
||||||
* @millu (Millun Atluri) - Community Manager, Documentation, Node-wrangler
|
* @JPPhoto - Core image generation nodes
|
||||||
* @chainchompa (Jennifer Player) - Web Development & Chain-Chomping
|
* @dunkeroni - Image generation backend
|
||||||
|
* @SkunkWorxDark - Image generation backend
|
||||||
* @keturn (Kevin Turner) - Diffusers
|
* @keturn (Kevin Turner) - Diffusers
|
||||||
|
* @millu (Millun Atluri) - Community Wizard, Documentation, Node-wrangler,
|
||||||
|
* @glimmerleaf (Devon Hopkins) - Community Wizard
|
||||||
* @gogurt enjoyer - Discord moderator and end user support
|
* @gogurt enjoyer - Discord moderator and end user support
|
||||||
* @whosawhatsis - Discord moderator and end user support
|
* @whosawhatsis - Discord moderator and end user support
|
||||||
* @dwinrger - Discord moderator and end user support
|
* @dwinrger - Discord moderator and end user support
|
||||||
* @526christian - Discord moderator and end user support
|
* @526christian - Discord moderator and end user support
|
||||||
|
* @harvester62 - Discord moderator and end user support
|
||||||
|
|
||||||
|
|
||||||
|
## **Honored Team Alumni**
|
||||||
|
|
||||||
|
* @StAlKeR7779 (Sergey Borisov) - Torch stack, ONNX, model management, optimization
|
||||||
|
* @damian0815 - Attention Systems and Compel Maintainer
|
||||||
|
* @netsvetaev (Artur) - Localization support
|
||||||
|
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||||
|
* @tildebyte - Installation and configuration
|
||||||
|
* @mauwii (Matthias Wilde) - Installation, release, continuous integration
|
||||||
|
|
||||||
|
|
||||||
## **Full List of Contributors by Commit Name**
|
## **Full List of Contributors by Commit Name**
|
||||||
|
|
||||||
|
- 이승석
|
||||||
- AbdBarho
|
- AbdBarho
|
||||||
- ablattmann
|
- ablattmann
|
||||||
- AdamOStark
|
- AdamOStark
|
||||||
- Adam Rice
|
- Adam Rice
|
||||||
- Airton Silva
|
- Airton Silva
|
||||||
|
- Aldo Hoeben
|
||||||
- Alexander Eichhorn
|
- Alexander Eichhorn
|
||||||
- Alexandre D. Roberge
|
- Alexandre D. Roberge
|
||||||
|
- Alexandre Macabies
|
||||||
|
- Alfie John
|
||||||
- Andreas Rozek
|
- Andreas Rozek
|
||||||
- Andre LaBranche
|
- Andre LaBranche
|
||||||
- Andy Bearman
|
- Andy Bearman
|
||||||
- Andy Luhrs
|
- Andy Luhrs
|
||||||
- Andy Pilate
|
- Andy Pilate
|
||||||
|
- Anonymous
|
||||||
|
- Anthony Monthe
|
||||||
- Any-Winter-4079
|
- Any-Winter-4079
|
||||||
- apolinario
|
- apolinario
|
||||||
|
- Ar7ific1al
|
||||||
- ArDiouscuros
|
- ArDiouscuros
|
||||||
- Armando C. Santisbon
|
- Armando C. Santisbon
|
||||||
|
- Arnold Cordewiner
|
||||||
- Arthur Holstvoogd
|
- Arthur Holstvoogd
|
||||||
- artmen1516
|
- artmen1516
|
||||||
- Artur
|
- Artur
|
||||||
@ -64,13 +87,16 @@ We thank them for all of their time and hard work.
|
|||||||
- blhook
|
- blhook
|
||||||
- BlueAmulet
|
- BlueAmulet
|
||||||
- Bouncyknighter
|
- Bouncyknighter
|
||||||
|
- Brandon
|
||||||
- Brandon Rising
|
- Brandon Rising
|
||||||
- Brent Ozar
|
- Brent Ozar
|
||||||
- Brian Racer
|
- Brian Racer
|
||||||
- bsilvereagle
|
- bsilvereagle
|
||||||
- c67e708d
|
- c67e708d
|
||||||
|
- camenduru
|
||||||
- CapableWeb
|
- CapableWeb
|
||||||
- Carson Katri
|
- Carson Katri
|
||||||
|
- chainchompa
|
||||||
- Chloe
|
- Chloe
|
||||||
- Chris Dawson
|
- Chris Dawson
|
||||||
- Chris Hayes
|
- Chris Hayes
|
||||||
@ -86,30 +112,45 @@ We thank them for all of their time and hard work.
|
|||||||
- cpacker
|
- cpacker
|
||||||
- Cragin Godley
|
- Cragin Godley
|
||||||
- creachec
|
- creachec
|
||||||
|
- CrypticWit
|
||||||
|
- d8ahazard
|
||||||
|
- damian
|
||||||
|
- damian0815
|
||||||
|
- Damian at mba
|
||||||
- Damian Stewart
|
- Damian Stewart
|
||||||
- Daniel Manzke
|
- Daniel Manzke
|
||||||
- Danny Beer
|
- Danny Beer
|
||||||
- Dan Sully
|
- Dan Sully
|
||||||
|
- Darren Ringer
|
||||||
- David Burnett
|
- David Burnett
|
||||||
- David Ford
|
- David Ford
|
||||||
- David Regla
|
- David Regla
|
||||||
|
- David Sisco
|
||||||
- David Wager
|
- David Wager
|
||||||
- Daya Adianto
|
- Daya Adianto
|
||||||
- db3000
|
- db3000
|
||||||
|
- DekitaRPG
|
||||||
- Denis Olshin
|
- Denis Olshin
|
||||||
- Dennis
|
- Dennis
|
||||||
|
- dependabot[bot]
|
||||||
|
- Dmitry Parnas
|
||||||
|
- Dobrynia100
|
||||||
- Dominic Letz
|
- Dominic Letz
|
||||||
- DrGunnarMallon
|
- DrGunnarMallon
|
||||||
|
- Drun555
|
||||||
|
- dunkeroni
|
||||||
- Edward Johan
|
- Edward Johan
|
||||||
- elliotsayes
|
- elliotsayes
|
||||||
- Elrik
|
- Elrik
|
||||||
- ElrikUnderlake
|
- ElrikUnderlake
|
||||||
- Eric Khun
|
- Eric Khun
|
||||||
- Eric Wolf
|
- Eric Wolf
|
||||||
|
- Eugene
|
||||||
- Eugene Brodsky
|
- Eugene Brodsky
|
||||||
- ExperimentalCyborg
|
- ExperimentalCyborg
|
||||||
- Fabian Bahl
|
- Fabian Bahl
|
||||||
- Fabio 'MrWHO' Torchetti
|
- Fabio 'MrWHO' Torchetti
|
||||||
|
- Fattire
|
||||||
- fattire
|
- fattire
|
||||||
- Felipe Nogueira
|
- Felipe Nogueira
|
||||||
- Félix Sanz
|
- Félix Sanz
|
||||||
@ -118,8 +159,12 @@ We thank them for all of their time and hard work.
|
|||||||
- gabrielrotbart
|
- gabrielrotbart
|
||||||
- gallegonovato
|
- gallegonovato
|
||||||
- Gérald LONLAS
|
- Gérald LONLAS
|
||||||
|
- Gille
|
||||||
- GitHub Actions Bot
|
- GitHub Actions Bot
|
||||||
|
- glibesyck
|
||||||
- gogurtenjoyer
|
- gogurtenjoyer
|
||||||
|
- Gohsuke Shimada
|
||||||
|
- greatwolf
|
||||||
- greentext2
|
- greentext2
|
||||||
- Gregg Helt
|
- Gregg Helt
|
||||||
- H4rk
|
- H4rk
|
||||||
@ -131,6 +176,7 @@ We thank them for all of their time and hard work.
|
|||||||
- Hosted Weblate
|
- Hosted Weblate
|
||||||
- Iman Karim
|
- Iman Karim
|
||||||
- ismail ihsan bülbül
|
- ismail ihsan bülbül
|
||||||
|
- ItzAttila
|
||||||
- Ivan Efimov
|
- Ivan Efimov
|
||||||
- jakehl
|
- jakehl
|
||||||
- Jakub Kolčář
|
- Jakub Kolčář
|
||||||
@ -141,6 +187,7 @@ We thank them for all of their time and hard work.
|
|||||||
- Jason Toffaletti
|
- Jason Toffaletti
|
||||||
- Jaulustus
|
- Jaulustus
|
||||||
- Jeff Mahoney
|
- Jeff Mahoney
|
||||||
|
- Jennifer Player
|
||||||
- jeremy
|
- jeremy
|
||||||
- Jeremy Clark
|
- Jeremy Clark
|
||||||
- JigenD
|
- JigenD
|
||||||
@ -148,19 +195,26 @@ We thank them for all of their time and hard work.
|
|||||||
- Johan Roxendal
|
- Johan Roxendal
|
||||||
- Johnathon Selstad
|
- Johnathon Selstad
|
||||||
- Jonathan
|
- Jonathan
|
||||||
|
- Jordan Hewitt
|
||||||
- Joseph Dries III
|
- Joseph Dries III
|
||||||
|
- Josh Corbett
|
||||||
- JPPhoto
|
- JPPhoto
|
||||||
- jspraul
|
- jspraul
|
||||||
|
- junzi
|
||||||
- Justin Wong
|
- Justin Wong
|
||||||
- Juuso V
|
- Juuso V
|
||||||
- Kaspar Emanuel
|
- Kaspar Emanuel
|
||||||
- Katsuyuki-Karasawa
|
- Katsuyuki-Karasawa
|
||||||
|
- Keerigan45
|
||||||
- Kent Keirsey
|
- Kent Keirsey
|
||||||
|
- Kevin Brack
|
||||||
- Kevin Coakley
|
- Kevin Coakley
|
||||||
- Kevin Gibbons
|
- Kevin Gibbons
|
||||||
- Kevin Schaul
|
- Kevin Schaul
|
||||||
- Kevin Turner
|
- Kevin Turner
|
||||||
|
- Kieran Klaassen
|
||||||
- krummrey
|
- krummrey
|
||||||
|
- Kyle
|
||||||
- Kyle Lacy
|
- Kyle Lacy
|
||||||
- Kyle Schouviller
|
- Kyle Schouviller
|
||||||
- Lawrence Norton
|
- Lawrence Norton
|
||||||
@ -171,10 +225,15 @@ We thank them for all of their time and hard work.
|
|||||||
- Lynne Whitehorn
|
- Lynne Whitehorn
|
||||||
- majick
|
- majick
|
||||||
- Marco Labarile
|
- Marco Labarile
|
||||||
|
- Marta Nahorniuk
|
||||||
- Martin Kristiansen
|
- Martin Kristiansen
|
||||||
|
- Mary Hipp
|
||||||
|
- maryhipp
|
||||||
- Mary Hipp Rogers
|
- Mary Hipp Rogers
|
||||||
|
- mastercaster
|
||||||
- mastercaster9000
|
- mastercaster9000
|
||||||
- Matthias Wild
|
- Matthias Wild
|
||||||
|
- mauwii
|
||||||
- michaelk71
|
- michaelk71
|
||||||
- mickr777
|
- mickr777
|
||||||
- Mihai
|
- Mihai
|
||||||
@ -182,11 +241,15 @@ We thank them for all of their time and hard work.
|
|||||||
- Mikhail Tishin
|
- Mikhail Tishin
|
||||||
- Millun Atluri
|
- Millun Atluri
|
||||||
- Minjune Song
|
- Minjune Song
|
||||||
|
- Mitchell Allain
|
||||||
- mitien
|
- mitien
|
||||||
- mofuzz
|
- mofuzz
|
||||||
- Muhammad Usama
|
- Muhammad Usama
|
||||||
- Name
|
- Name
|
||||||
- _nderscore
|
- _nderscore
|
||||||
|
- Neil Wang
|
||||||
|
- nekowaiz
|
||||||
|
- nemuruibai
|
||||||
- Netzer R
|
- Netzer R
|
||||||
- Nicholas Koh
|
- Nicholas Koh
|
||||||
- Nicholas Körfer
|
- Nicholas Körfer
|
||||||
@ -197,9 +260,11 @@ We thank them for all of their time and hard work.
|
|||||||
- ofirkris
|
- ofirkris
|
||||||
- Olivier Louvignes
|
- Olivier Louvignes
|
||||||
- owenvincent
|
- owenvincent
|
||||||
|
- pand4z31
|
||||||
- Patrick Esser
|
- Patrick Esser
|
||||||
- Patrick Tien
|
- Patrick Tien
|
||||||
- Patrick von Platen
|
- Patrick von Platen
|
||||||
|
- Paul Curry
|
||||||
- Paul Sajna
|
- Paul Sajna
|
||||||
- pejotr
|
- pejotr
|
||||||
- Peter Baylies
|
- Peter Baylies
|
||||||
@ -207,6 +272,7 @@ We thank them for all of their time and hard work.
|
|||||||
- plucked
|
- plucked
|
||||||
- prixt
|
- prixt
|
||||||
- psychedelicious
|
- psychedelicious
|
||||||
|
- psychedelicious@windows
|
||||||
- Rainer Bernhardt
|
- Rainer Bernhardt
|
||||||
- Riccardo Giovanetti
|
- Riccardo Giovanetti
|
||||||
- Rich Jones
|
- Rich Jones
|
||||||
@ -215,16 +281,22 @@ We thank them for all of their time and hard work.
|
|||||||
- Robert Bolender
|
- Robert Bolender
|
||||||
- Robin Rombach
|
- Robin Rombach
|
||||||
- Rohan Barar
|
- Rohan Barar
|
||||||
|
- Rohinish
|
||||||
- rpagliuca
|
- rpagliuca
|
||||||
- rromb
|
- rromb
|
||||||
- Rupesh Sreeraman
|
- Rupesh Sreeraman
|
||||||
|
- Ryan
|
||||||
- Ryan Cao
|
- Ryan Cao
|
||||||
|
- Ryan Dick
|
||||||
- Saifeddine
|
- Saifeddine
|
||||||
- Saifeddine ALOUI
|
- Saifeddine ALOUI
|
||||||
|
- Sam
|
||||||
- SammCheese
|
- SammCheese
|
||||||
|
- Sam McLeod
|
||||||
- Sammy
|
- Sammy
|
||||||
- sammyf
|
- sammyf
|
||||||
- Samuel Husso
|
- Samuel Husso
|
||||||
|
- Saurav Maheshkar
|
||||||
- Scott Lahteine
|
- Scott Lahteine
|
||||||
- Sean McLellan
|
- Sean McLellan
|
||||||
- Sebastian Aigner
|
- Sebastian Aigner
|
||||||
@ -232,16 +304,21 @@ We thank them for all of their time and hard work.
|
|||||||
- Sergey Krashevich
|
- Sergey Krashevich
|
||||||
- Shapor Naghibzadeh
|
- Shapor Naghibzadeh
|
||||||
- Shawn Zhong
|
- Shawn Zhong
|
||||||
|
- Simona Liliac
|
||||||
- Simon Vans-Colina
|
- Simon Vans-Colina
|
||||||
- skunkworxdark
|
- skunkworxdark
|
||||||
- slashtechno
|
- slashtechno
|
||||||
|
- SoheilRezaei
|
||||||
|
- Song, Pengcheng
|
||||||
- spezialspezial
|
- spezialspezial
|
||||||
- ssantos
|
- ssantos
|
||||||
- StAlKeR7779
|
- StAlKeR7779
|
||||||
|
- Stefan Tobler
|
||||||
- Stephan Koglin-Fischer
|
- Stephan Koglin-Fischer
|
||||||
- SteveCaruso
|
- SteveCaruso
|
||||||
- Steve Martinelli
|
- Steve Martinelli
|
||||||
- Steven Frank
|
- Steven Frank
|
||||||
|
- Surisen
|
||||||
- System X - Files
|
- System X - Files
|
||||||
- Taylor Kems
|
- Taylor Kems
|
||||||
- techicode
|
- techicode
|
||||||
@ -260,26 +337,34 @@ We thank them for all of their time and hard work.
|
|||||||
- tyler
|
- tyler
|
||||||
- unknown
|
- unknown
|
||||||
- user1
|
- user1
|
||||||
|
- vedant-3010
|
||||||
- Vedant Madane
|
- Vedant Madane
|
||||||
- veprogames
|
- veprogames
|
||||||
- wa.code
|
- wa.code
|
||||||
- wfng92
|
- wfng92
|
||||||
|
- whjms
|
||||||
- whosawhatsis
|
- whosawhatsis
|
||||||
- Will
|
- Will
|
||||||
- William Becher
|
- William Becher
|
||||||
- William Chong
|
- William Chong
|
||||||
|
- Wilson E. Alvarez
|
||||||
|
- woweenie
|
||||||
|
- Wubbbi
|
||||||
- xra
|
- xra
|
||||||
- Yeung Yiu Hung
|
- Yeung Yiu Hung
|
||||||
- ymgenesis
|
- ymgenesis
|
||||||
- Yorzaren
|
- Yorzaren
|
||||||
- Yosuke Shinya
|
- Yosuke Shinya
|
||||||
- yun saki
|
- yun saki
|
||||||
|
- ZachNagengast
|
||||||
- Zadagu
|
- Zadagu
|
||||||
- zeptofine
|
- zeptofine
|
||||||
|
- Zerdoumi
|
||||||
|
- Васянатор
|
||||||
- 冯不游
|
- 冯不游
|
||||||
- 唐澤 克幸
|
- 唐澤 克幸
|
||||||
|
|
||||||
## **Original CompVis Authors**
|
## **Original CompVis (Stable Diffusion) Authors**
|
||||||
|
|
||||||
- [Robin Rombach](https://github.com/rromb)
|
- [Robin Rombach](https://github.com/rromb)
|
||||||
- [Patrick von Platen](https://github.com/patrickvonplaten)
|
- [Patrick von Platen](https://github.com/patrickvonplaten)
|
||||||
|
5
docs/stylesheets/extra.css
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
:root {
|
||||||
|
--md-primary-fg-color: #35A4DB;
|
||||||
|
--md-primary-fg-color--light: #35A4DB;
|
||||||
|
--md-primary-fg-color--dark: #35A4DB;
|
||||||
|
}
|
1364
docs/workflows/ESRGAN_img2img_upscale_w_Canny_ControlNet.json
Normal file
2930
docs/workflows/Face_Detailer_with_IP-Adapter_and_Canny.json
Normal file
1480
docs/workflows/Multi_ControlNet_Canny_and_Depth.json
Normal file
975
docs/workflows/Prompt_from_File.json
Normal file
@ -0,0 +1,975 @@
|
|||||||
|
{
|
||||||
|
"name": "Prompt from File",
|
||||||
|
"author": "InvokeAI",
|
||||||
|
"description": "Sample workflow using Prompt from File node",
|
||||||
|
"version": "0.1.0",
|
||||||
|
"contact": "invoke@invoke.ai",
|
||||||
|
"tags": "text2image, prompt from file, default",
|
||||||
|
"notes": "",
|
||||||
|
"exposedFields": [
|
||||||
|
{
|
||||||
|
"nodeId": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||||
|
"fieldName": "model"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nodeId": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||||
|
"fieldName": "file_path"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"meta": {
|
||||||
|
"category": "default",
|
||||||
|
"version": "2.0.0"
|
||||||
|
},
|
||||||
|
"id": "d1609af5-eb0a-4f73-b573-c9af96a8d6bf",
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
||||||
|
"type": "compel",
|
||||||
|
"label": "",
|
||||||
|
"isOpen": false,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"prompt": {
|
||||||
|
"id": "dcdf3f6d-9b96-4bcd-9b8d-f992fefe4f62",
|
||||||
|
"name": "prompt",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "StringField"
|
||||||
|
},
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"clip": {
|
||||||
|
"id": "3f1981c9-d8a9-42eb-a739-4f120eb80745",
|
||||||
|
"name": "clip",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ClipField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"conditioning": {
|
||||||
|
"id": "46205e6c-c5e2-44cb-9c82-1cd20b95674a",
|
||||||
|
"name": "conditioning",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ConditioningField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 32,
|
||||||
|
"position": {
|
||||||
|
"x": 925,
|
||||||
|
"y": -200
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||||
|
"type": "prompt_from_file",
|
||||||
|
"label": "Prompts from File",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.1",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"file_path": {
|
||||||
|
"id": "37e37684-4f30-4ec8-beae-b333e550f904",
|
||||||
|
"name": "file_path",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "Prompts File Path",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "StringField"
|
||||||
|
},
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"pre_prompt": {
|
||||||
|
"id": "7de02feb-819a-4992-bad3-72a30920ddea",
|
||||||
|
"name": "pre_prompt",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "StringField"
|
||||||
|
},
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"post_prompt": {
|
||||||
|
"id": "95f191d8-a282-428e-bd65-de8cb9b7513a",
|
||||||
|
"name": "post_prompt",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "StringField"
|
||||||
|
},
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"start_line": {
|
||||||
|
"id": "efee9a48-05ab-4829-8429-becfa64a0782",
|
||||||
|
"name": "start_line",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 1
|
||||||
|
},
|
||||||
|
"max_prompts": {
|
||||||
|
"id": "abebb428-3d3d-49fd-a482-4e96a16fff08",
|
||||||
|
"name": "max_prompts",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 1
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"collection": {
|
||||||
|
"id": "77d5d7f1-9877-4ab1-9a8c-33e9ffa9abf3",
|
||||||
|
"name": "collection",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": true,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "StringField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 580,
|
||||||
|
"position": {
|
||||||
|
"x": 475,
|
||||||
|
"y": -400
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||||
|
"type": "iterate",
|
||||||
|
"label": "",
|
||||||
|
"isOpen": false,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.1.0",
|
||||||
|
"inputs": {
|
||||||
|
"collection": {
|
||||||
|
"id": "4c564bf8-5ed6-441e-ad2c-dda265d5785f",
|
||||||
|
"name": "collection",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": true,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "CollectionField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"item": {
|
||||||
|
"id": "36340f9a-e7a5-4afa-b4b5-313f4e292380",
|
||||||
|
"name": "item",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "CollectionItemField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"index": {
|
||||||
|
"id": "1beca95a-2159-460f-97ff-c8bab7d89336",
|
||||||
|
"name": "index",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"total": {
|
||||||
|
"id": "ead597b8-108e-4eda-88a8-5c29fa2f8df9",
|
||||||
|
"name": "total",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 32,
|
||||||
|
"position": {
|
||||||
|
"x": 925,
|
||||||
|
"y": -400
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||||
|
"type": "main_model_loader",
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"model": {
|
||||||
|
"id": "3f264259-3418-47d5-b90d-b6600e36ae46",
|
||||||
|
"name": "model",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "MainModelField"
|
||||||
|
},
|
||||||
|
"value": {
|
||||||
|
"model_name": "stable-diffusion-v1-5",
|
||||||
|
"base_model": "sd-1",
|
||||||
|
"model_type": "main"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"unet": {
|
||||||
|
"id": "8e182ea2-9d0a-4c02-9407-27819288d4b5",
|
||||||
|
"name": "unet",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "UNetField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"clip": {
|
||||||
|
"id": "d67d9d30-058c-46d5-bded-3d09d6d1aa39",
|
||||||
|
"name": "clip",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ClipField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"vae": {
|
||||||
|
"id": "89641601-0429-4448-98d5-190822d920d8",
|
||||||
|
"name": "vae",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "VaeField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 227,
|
||||||
|
"position": {
|
||||||
|
"x": 0,
|
||||||
|
"y": -375
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||||
|
"type": "compel",
|
||||||
|
"label": "",
|
||||||
|
"isOpen": false,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"prompt": {
|
||||||
|
"id": "dcdf3f6d-9b96-4bcd-9b8d-f992fefe4f62",
|
||||||
|
"name": "prompt",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "StringField"
|
||||||
|
},
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"clip": {
|
||||||
|
"id": "3f1981c9-d8a9-42eb-a739-4f120eb80745",
|
||||||
|
"name": "clip",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ClipField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"conditioning": {
|
||||||
|
"id": "46205e6c-c5e2-44cb-9c82-1cd20b95674a",
|
||||||
|
"name": "conditioning",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ConditioningField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 32,
|
||||||
|
"position": {
|
||||||
|
"x": 925,
|
||||||
|
"y": -275
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||||
|
"type": "noise",
|
||||||
|
"label": "",
|
||||||
|
"isOpen": false,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.1",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"seed": {
|
||||||
|
"id": "b722d84a-eeee-484f-bef2-0250c027cb67",
|
||||||
|
"name": "seed",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "d5f8ce11-0502-4bfc-9a30-5757dddf1f94",
|
||||||
|
"name": "width",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 512
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "f187d5ff-38a5-4c3f-b780-fc5801ef34af",
|
||||||
|
"name": "height",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 512
|
||||||
|
},
|
||||||
|
"use_cpu": {
|
||||||
|
"id": "12f112b8-8b76-4816-b79e-662edc9f9aa5",
|
||||||
|
"name": "use_cpu",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "BooleanField"
|
||||||
|
},
|
||||||
|
"value": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"noise": {
|
||||||
|
"id": "08576ad1-96d9-42d2-96ef-6f5c1961933f",
|
||||||
|
"name": "noise",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "LatentsField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "f3e1f94a-258d-41ff-9789-bd999bd9f40d",
|
||||||
|
"name": "width",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "6cefc357-4339-415e-a951-49b9c2be32f4",
|
||||||
|
"name": "height",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 32,
|
||||||
|
"position": {
|
||||||
|
"x": 925,
|
||||||
|
"y": 25
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||||
|
"type": "rand_int",
|
||||||
|
"label": "",
|
||||||
|
"isOpen": false,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": false,
|
||||||
|
"version": "1.0.0",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"low": {
|
||||||
|
"id": "b9fc6cf1-469c-4037-9bf0-04836965826f",
|
||||||
|
"name": "low",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"high": {
|
||||||
|
"id": "06eac725-0f60-4ba2-b8cd-7ad9f757488c",
|
||||||
|
"name": "high",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 2147483647
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"value": {
|
||||||
|
"id": "df08c84e-7346-4e92-9042-9e5cb773aaff",
|
||||||
|
"name": "value",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 32,
|
||||||
|
"position": {
|
||||||
|
"x": 925,
|
||||||
|
"y": -50
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||||
|
"type": "l2i",
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.2.0",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"metadata": {
|
||||||
|
"id": "022e4b33-562b-438d-b7df-41c3fd931f40",
|
||||||
|
"name": "metadata",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "MetadataField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"latents": {
|
||||||
|
"id": "67cb6c77-a394-4a66-a6a9-a0a7dcca69ec",
|
||||||
|
"name": "latents",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "LatentsField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"vae": {
|
||||||
|
"id": "7b3fd9ad-a4ef-4e04-89fa-3832a9902dbd",
|
||||||
|
"name": "vae",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "VaeField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"tiled": {
|
||||||
|
"id": "5ac5680d-3add-4115-8ec0-9ef5bb87493b",
|
||||||
|
"name": "tiled",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "BooleanField"
|
||||||
|
},
|
||||||
|
"value": false
|
||||||
|
},
|
||||||
|
"fp32": {
|
||||||
|
"id": "db8297f5-55f8-452f-98cf-6572c2582152",
|
||||||
|
"name": "fp32",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "BooleanField"
|
||||||
|
},
|
||||||
|
"value": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"image": {
|
||||||
|
"id": "d8778d0c-592a-4960-9280-4e77e00a7f33",
|
||||||
|
"name": "image",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ImageField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "c8b0a75a-f5de-4ff2-9227-f25bb2b97bec",
|
||||||
|
"name": "width",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "83c05fbf-76b9-49ab-93c4-fa4b10e793e4",
|
||||||
|
"name": "height",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 267,
|
||||||
|
"position": {
|
||||||
|
"x": 2037.861329274915,
|
||||||
|
"y": -329.8393457509562
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
|
"type": "denoise_latents",
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.5.0",
|
||||||
|
"nodePack": "invokeai",
|
||||||
|
"inputs": {
|
||||||
|
"positive_conditioning": {
|
||||||
|
"id": "751fb35b-3f23-45ce-af1c-053e74251337",
|
||||||
|
"name": "positive_conditioning",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ConditioningField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"negative_conditioning": {
|
||||||
|
"id": "b9dc06b6-7481-4db1-a8c2-39d22a5eacff",
|
||||||
|
"name": "negative_conditioning",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "ConditioningField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"noise": {
|
||||||
|
"id": "6e15e439-3390-48a4-8031-01e0e19f0e1d",
|
||||||
|
"name": "noise",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "LatentsField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"steps": {
|
||||||
|
"id": "bfdfb3df-760b-4d51-b17b-0abb38b976c2",
|
||||||
|
"name": "steps",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
},
|
||||||
|
"value": 10
|
||||||
|
},
|
||||||
|
"cfg_scale": {
|
||||||
|
"id": "47770858-322e-41af-8494-d8b63ed735f3",
|
||||||
|
"name": "cfg_scale",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": true,
|
||||||
|
"name": "FloatField"
|
||||||
|
},
|
||||||
|
"value": 7.5
|
||||||
|
},
|
||||||
|
"denoising_start": {
|
||||||
|
"id": "2ba78720-ee02-4130-a348-7bc3531f790b",
|
||||||
|
"name": "denoising_start",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "FloatField"
|
||||||
|
},
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"denoising_end": {
|
||||||
|
"id": "a874dffb-d433-4d1a-9f59-af4367bb05e4",
|
||||||
|
"name": "denoising_end",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "FloatField"
|
||||||
|
},
|
||||||
|
"value": 1
|
||||||
|
},
|
||||||
|
"scheduler": {
|
||||||
|
"id": "36e021ad-b762-4fe4-ad4d-17f0291c40b2",
|
||||||
|
"name": "scheduler",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "SchedulerField"
|
||||||
|
},
|
||||||
|
"value": "euler"
|
||||||
|
},
|
||||||
|
"unet": {
|
||||||
|
"id": "98d3282d-f9f6-4b5e-b9e8-58658f1cac78",
|
||||||
|
"name": "unet",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "UNetField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"control": {
|
||||||
|
"id": "f2ea3216-43d5-42b4-887f-36e8f7166d53",
|
||||||
|
"name": "control",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": true,
|
||||||
|
"name": "ControlField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ip_adapter": {
|
||||||
|
"id": "d0780610-a298-47c8-a54e-70e769e0dfe2",
|
||||||
|
"name": "ip_adapter",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": true,
|
||||||
|
"name": "IPAdapterField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"t2i_adapter": {
|
||||||
|
"id": "fdb40970-185e-4ea8-8bb5-88f06f91f46a",
|
||||||
|
"name": "t2i_adapter",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": true,
|
||||||
|
"name": "T2IAdapterField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"cfg_rescale_multiplier": {
|
||||||
|
"id": "3af2d8c5-de83-425c-a100-49cb0f1f4385",
|
||||||
|
"name": "cfg_rescale_multiplier",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "FloatField"
|
||||||
|
},
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"latents": {
|
||||||
|
"id": "e05b538a-1b5a-4aa5-84b1-fd2361289a81",
|
||||||
|
"name": "latents",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "LatentsField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"denoise_mask": {
|
||||||
|
"id": "463a419e-df30-4382-8ffb-b25b25abe425",
|
||||||
|
"name": "denoise_mask",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "DenoiseMaskField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"latents": {
|
||||||
|
"id": "559ee688-66cf-4139-8b82-3d3aa69995ce",
|
||||||
|
"name": "latents",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "LatentsField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "0b4285c2-e8b9-48e5-98f6-0a49d3f98fd2",
|
||||||
|
"name": "width",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "8b0881b9-45e5-47d5-b526-24b6661de0ee",
|
||||||
|
"name": "height",
|
||||||
|
"fieldKind": "output",
|
||||||
|
"type": {
|
||||||
|
"isCollection": false,
|
||||||
|
"isCollectionOrScalar": false,
|
||||||
|
"name": "IntegerField"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 705,
|
||||||
|
"position": {
|
||||||
|
"x": 1570.9941088179146,
|
||||||
|
"y": -407.6505491604564
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"edges": [
|
||||||
|
{
|
||||||
|
"id": "1b89067c-3f6b-42c8-991f-e3055789b251-fc9d0e35-a6de-4a19-84e1-c72497c823f6-collapsed",
|
||||||
|
"source": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||||
|
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||||
|
"type": "collapsed"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77-collapsed",
|
||||||
|
"source": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||||
|
"target": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||||
|
"type": "collapsed"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-1b7e0df8-8589-4915-a4ea-c0088f15d642collection-1b89067c-3f6b-42c8-991f-e3055789b251collection",
|
||||||
|
"source": "1b7e0df8-8589-4915-a4ea-c0088f15d642",
|
||||||
|
"target": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "collection",
|
||||||
|
"targetHandle": "collection"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426clip-fc9d0e35-a6de-4a19-84e1-c72497c823f6clip",
|
||||||
|
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||||
|
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "clip",
|
||||||
|
"targetHandle": "clip"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-1b89067c-3f6b-42c8-991f-e3055789b251item-fc9d0e35-a6de-4a19-84e1-c72497c823f6prompt",
|
||||||
|
"source": "1b89067c-3f6b-42c8-991f-e3055789b251",
|
||||||
|
"target": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "item",
|
||||||
|
"targetHandle": "prompt"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426clip-c2eaf1ba-5708-4679-9e15-945b8b432692clip",
|
||||||
|
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||||
|
"target": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "clip",
|
||||||
|
"targetHandle": "clip"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5value-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77seed",
|
||||||
|
"source": "dfc20e07-7aef-4fc0-a3a1-7bf68ec6a4e5",
|
||||||
|
"target": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "value",
|
||||||
|
"targetHandle": "seed"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-fc9d0e35-a6de-4a19-84e1-c72497c823f6conditioning-2fb1577f-0a56-4f12-8711-8afcaaaf1d5epositive_conditioning",
|
||||||
|
"source": "fc9d0e35-a6de-4a19-84e1-c72497c823f6",
|
||||||
|
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "conditioning",
|
||||||
|
"targetHandle": "positive_conditioning"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-c2eaf1ba-5708-4679-9e15-945b8b432692conditioning-2fb1577f-0a56-4f12-8711-8afcaaaf1d5enegative_conditioning",
|
||||||
|
"source": "c2eaf1ba-5708-4679-9e15-945b8b432692",
|
||||||
|
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "conditioning",
|
||||||
|
"targetHandle": "negative_conditioning"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77noise-2fb1577f-0a56-4f12-8711-8afcaaaf1d5enoise",
|
||||||
|
"source": "0eb5f3f5-1b91-49eb-9ef0-41d67c7eae77",
|
||||||
|
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "noise",
|
||||||
|
"targetHandle": "noise"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426unet-2fb1577f-0a56-4f12-8711-8afcaaaf1d5eunet",
|
||||||
|
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||||
|
"target": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "unet",
|
||||||
|
"targetHandle": "unet"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-2fb1577f-0a56-4f12-8711-8afcaaaf1d5elatents-491ec988-3c77-4c37-af8a-39a0c4e7a2a1latents",
|
||||||
|
"source": "2fb1577f-0a56-4f12-8711-8afcaaaf1d5e",
|
||||||
|
"target": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "latents",
|
||||||
|
"targetHandle": "latents"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "reactflow__edge-d6353b7f-b447-4e17-8f2e-80a88c91d426vae-491ec988-3c77-4c37-af8a-39a0c4e7a2a1vae",
|
||||||
|
"source": "d6353b7f-b447-4e17-8f2e-80a88c91d426",
|
||||||
|
"target": "491ec988-3c77-4c37-af8a-39a0c4e7a2a1",
|
||||||
|
"type": "default",
|
||||||
|
"sourceHandle": "vae",
|
||||||
|
"targetHandle": "vae"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
758
docs/workflows/QR_Code_Monster.json
Normal file
@ -0,0 +1,758 @@
|
|||||||
|
{
|
||||||
|
"name": "QR Code Monster",
|
||||||
|
"author": "InvokeAI",
|
||||||
|
"description": "Sample workflow for create images with QR code Monster ControlNet",
|
||||||
|
"version": "1.0.1",
|
||||||
|
"contact": "invoke@invoke.ai",
|
||||||
|
"tags": "qrcode, controlnet, default",
|
||||||
|
"notes": "",
|
||||||
|
"exposedFields": [
|
||||||
|
{
|
||||||
|
"nodeId": "a6cc0986-f928-4a7e-8d44-ba2d4b36f54a",
|
||||||
|
"fieldName": "image"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nodeId": "aca3b054-bfba-4392-bd20-6476f59504df",
|
||||||
|
"fieldName": "prompt"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"nodeId": "3db7cee0-31e2-4a3d-94a1-268cb16177dd",
|
||||||
|
"fieldName": "prompt"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"meta": {
|
||||||
|
"version": "1.0.0"
|
||||||
|
},
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "3db7cee0-31e2-4a3d-94a1-268cb16177dd",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "3db7cee0-31e2-4a3d-94a1-268cb16177dd",
|
||||||
|
"type": "compel",
|
||||||
|
"inputs": {
|
||||||
|
"prompt": {
|
||||||
|
"id": "6a1fe244-5656-4f8c-91d1-1fb474e28807",
|
||||||
|
"name": "prompt",
|
||||||
|
"type": "string",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "Negative Prompt",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"clip": {
|
||||||
|
"id": "f24688f3-29b8-4a2d-8603-046e5a5c7250",
|
||||||
|
"name": "clip",
|
||||||
|
"type": "ClipField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"conditioning": {
|
||||||
|
"id": "700528eb-3f8b-4745-b540-34f919b5b228",
|
||||||
|
"name": "conditioning",
|
||||||
|
"type": "ConditioningField",
|
||||||
|
"fieldKind": "output"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"label": "Prompt",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"embedWorkflow": false,
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0"
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 261,
|
||||||
|
"position": {
|
||||||
|
"x": 773.0502679628016,
|
||||||
|
"y": 1622.4836086770556
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "610384f1-6f0c-4847-a9a2-37ce7f456ed1",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "610384f1-6f0c-4847-a9a2-37ce7f456ed1",
|
||||||
|
"type": "main_model_loader",
|
||||||
|
"inputs": {
|
||||||
|
"model": {
|
||||||
|
"id": "cb36b6d3-6c1f-4911-a200-646745b0ff74",
|
||||||
|
"name": "model",
|
||||||
|
"type": "MainModelField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": {
|
||||||
|
"model_name": "stable-diffusion-v1-5",
|
||||||
|
"base_model": "sd-1",
|
||||||
|
"model_type": "main"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"unet": {
|
||||||
|
"id": "7246895b-b252-49bc-b952-8d801b4672f7",
|
||||||
|
"name": "unet",
|
||||||
|
"type": "UNetField",
|
||||||
|
"fieldKind": "output"
|
||||||
|
},
|
||||||
|
"clip": {
|
||||||
|
"id": "3c2aedb8-30d5-4d4b-99df-d06a0d7bedc6",
|
||||||
|
"name": "clip",
|
||||||
|
"type": "ClipField",
|
||||||
|
"fieldKind": "output"
|
||||||
|
},
|
||||||
|
"vae": {
|
||||||
|
"id": "b9743815-5501-4bbb-8bde-8bd6ba298a4e",
|
||||||
|
"name": "vae",
|
||||||
|
"type": "VaeField",
|
||||||
|
"fieldKind": "output"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"embedWorkflow": false,
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0"
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 226,
|
||||||
|
"position": {
|
||||||
|
"x": 211.58866462619744,
|
||||||
|
"y": 1376.0542388105248
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "aca3b054-bfba-4392-bd20-6476f59504df",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "aca3b054-bfba-4392-bd20-6476f59504df",
|
||||||
|
"type": "compel",
|
||||||
|
"inputs": {
|
||||||
|
"prompt": {
|
||||||
|
"id": "6a1fe244-5656-4f8c-91d1-1fb474e28807",
|
||||||
|
"name": "prompt",
|
||||||
|
"type": "string",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "Positive Prompt",
|
||||||
|
"value": ""
|
||||||
|
},
|
||||||
|
"clip": {
|
||||||
|
"id": "f24688f3-29b8-4a2d-8603-046e5a5c7250",
|
||||||
|
"name": "clip",
|
||||||
|
"type": "ClipField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"conditioning": {
|
||||||
|
"id": "700528eb-3f8b-4745-b540-34f919b5b228",
|
||||||
|
"name": "conditioning",
|
||||||
|
"type": "ConditioningField",
|
||||||
|
"fieldKind": "output"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"embedWorkflow": false,
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0"
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 261,
|
||||||
|
"position": {
|
||||||
|
"x": 770.6491131680111,
|
||||||
|
"y": 1316.379247112241
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "a6cc0986-f928-4a7e-8d44-ba2d4b36f54a",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "a6cc0986-f928-4a7e-8d44-ba2d4b36f54a",
|
||||||
|
"type": "image",
|
||||||
|
"inputs": {
|
||||||
|
"image": {
|
||||||
|
"id": "89ba5d58-28c9-4e04-a5df-79fb7a6f3531",
|
||||||
|
"name": "image",
|
||||||
|
"type": "ImageField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "QR Code / Hidden Image"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"image": {
|
||||||
|
"id": "54335653-0e17-42da-b9e8-83c5fb5af670",
|
||||||
|
"name": "image",
|
||||||
|
"type": "ImageField",
|
||||||
|
"fieldKind": "output"
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "a3c65953-39ea-4d97-8858-d65154ff9d11",
|
||||||
|
"name": "width",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "output"
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "2c7db511-ebc9-4286-a46b-bc11e0fd779f",
|
||||||
|
"name": "height",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "output"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"embedWorkflow": false,
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0"
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 225,
|
||||||
|
"position": {
|
||||||
|
"x": 700.5034176864369,
|
||||||
|
"y": 1981.749600549388
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a",
|
||||||
|
"type": "noise",
|
||||||
|
"inputs": {
|
||||||
|
"seed": {
|
||||||
|
"id": "7c6c76dd-127b-4829-b1ec-430790cb7ed7",
|
||||||
|
"name": "seed",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "8ec6a525-a421-40d8-a17e-39e7b6836438",
|
||||||
|
"name": "width",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": 512
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "6af1e58a-e2ee-4ec4-9f06-d8d0412922ca",
|
||||||
|
"name": "height",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": 512
|
||||||
|
},
|
||||||
|
"use_cpu": {
|
||||||
|
"id": "26662e99-5720-43a6-a5d8-06c9dab0e261",
|
||||||
|
"name": "use_cpu",
|
||||||
|
"type": "boolean",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"noise": {
|
||||||
|
"id": "cb4c4dfc-a744-49eb-af4f-677448e28407",
|
||||||
|
"name": "noise",
|
||||||
|
"type": "LatentsField",
|
||||||
|
"fieldKind": "output"
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "97e87be6-e81f-40a3-a522-28ebe4aad0ac",
|
||||||
|
"name": "width",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "output"
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "80784420-f1e1-47b0-bd1d-1d381a15e22d",
|
||||||
|
"name": "height",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "output"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"label": "",
|
||||||
|
"isOpen": false,
|
||||||
|
"notes": "",
|
||||||
|
"embedWorkflow": false,
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0"
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 32,
|
||||||
|
"position": {
|
||||||
|
"x": 1182.460291960481,
|
||||||
|
"y": 1759.592972960265
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "2ac03cf6-0326-454a-bed0-d8baef2bf30d",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "2ac03cf6-0326-454a-bed0-d8baef2bf30d",
|
||||||
|
"type": "controlnet",
|
||||||
|
"inputs": {
|
||||||
|
"image": {
|
||||||
|
"id": "1f683889-9f14-40c8-af29-4b991b211a3a",
|
||||||
|
"name": "image",
|
||||||
|
"type": "ImageField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
},
|
||||||
|
"control_model": {
|
||||||
|
"id": "a933b21d-22c1-4e06-818f-15416b971282",
|
||||||
|
"name": "control_model",
|
||||||
|
"type": "ControlNetModelField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": {
|
||||||
|
"model_name": "qrcode_monster",
|
||||||
|
"base_model": "sd-1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"control_weight": {
|
||||||
|
"id": "198a0825-e55e-4496-bc54-c3d7b02f3d75",
|
||||||
|
"name": "control_weight",
|
||||||
|
"type": "FloatPolymorphic",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": 1.4
|
||||||
|
},
|
||||||
|
"begin_step_percent": {
|
||||||
|
"id": "c85ce42f-22af-42a0-8993-676002fb275e",
|
||||||
|
"name": "begin_step_percent",
|
||||||
|
"type": "float",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"end_step_percent": {
|
||||||
|
"id": "a61a65c4-9e6f-4fe2-96a5-1294d17ec6e4",
|
||||||
|
"name": "end_step_percent",
|
||||||
|
"type": "float",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": 1
|
||||||
|
},
|
||||||
|
"control_mode": {
|
||||||
|
"id": "1aa45cfa-0249-46b7-bf24-3e38e92f5fa0",
|
||||||
|
"name": "control_mode",
|
||||||
|
"type": "enum",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": "balanced"
|
||||||
|
},
|
||||||
|
"resize_mode": {
|
||||||
|
"id": "a89d3cb9-a141-4cea-bb49-977bf267377b",
|
||||||
|
"name": "resize_mode",
|
||||||
|
"type": "enum",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": "just_resize"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"control": {
|
||||||
|
"id": "c9a1fc7e-cb25-45a9-adff-1a97c9ff04d6",
|
||||||
|
"name": "control",
|
||||||
|
"type": "ControlField",
|
||||||
|
"fieldKind": "output"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"embedWorkflow": false,
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0"
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 508,
|
||||||
|
"position": {
|
||||||
|
"x": 1165.434407461108,
|
||||||
|
"y": 1862.916856351665
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "28542b66-5a00-4780-a318-0a036d2df914",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "28542b66-5a00-4780-a318-0a036d2df914",
|
||||||
|
"type": "l2i",
|
||||||
|
"inputs": {
|
||||||
|
"metadata": {
|
||||||
|
"id": "a38e8f55-7f2c-4fcc-a71f-d51e2eb0374a",
|
||||||
|
"name": "metadata",
|
||||||
|
"type": "MetadataField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
},
|
||||||
|
"latents": {
|
||||||
|
"id": "80e97bc8-e716-4175-9115-5b58495aa30c",
|
||||||
|
"name": "latents",
|
||||||
|
"type": "LatentsField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
},
|
||||||
|
"vae": {
|
||||||
|
"id": "5641bce6-ac2b-47eb-bb32-2f290026b7e1",
|
||||||
|
"name": "vae",
|
||||||
|
"type": "VaeField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
},
|
||||||
|
"tiled": {
|
||||||
|
"id": "9e75eb16-ae48-47ed-b180-e0409d377436",
|
||||||
|
"name": "tiled",
|
||||||
|
"type": "boolean",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": false
|
||||||
|
},
|
||||||
|
"fp32": {
|
||||||
|
"id": "0518b0ce-ee37-437b-8437-cc2976a3279f",
|
||||||
|
"name": "fp32",
|
||||||
|
"type": "boolean",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"image": {
|
||||||
|
"id": "ec2ff985-a7eb-401f-92c4-1217cddad6a2",
|
||||||
|
"name": "image",
|
||||||
|
"type": "ImageField",
|
||||||
|
"fieldKind": "output"
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "ba1d1720-6d67-4eca-9e9d-b97d08636774",
|
||||||
|
"name": "width",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "output"
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "10bcf8f4-6394-422f-b0c0-51680f3bfb25",
|
||||||
|
"name": "height",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "output"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"embedWorkflow": false,
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.0.0"
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 267,
|
||||||
|
"position": {
|
||||||
|
"x": 2110.8415693683014,
|
||||||
|
"y": 1487.253341116115
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
|
||||||
|
"type": "denoise_latents",
|
||||||
|
"inputs": {
|
||||||
|
"positive_conditioning": {
|
||||||
|
"id": "8e6aceaa-a986-4ab2-9c04-5b1027b3daf6",
|
||||||
|
"name": "positive_conditioning",
|
||||||
|
"type": "ConditioningField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
},
|
||||||
|
"negative_conditioning": {
|
||||||
|
"id": "fbbaa712-ca1a-420b-9016-763f2a29d68c",
|
||||||
|
"name": "negative_conditioning",
|
||||||
|
"type": "ConditioningField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
},
|
||||||
|
"noise": {
|
||||||
|
"id": "a3b3d5d2-c0f9-4b89-a9b3-8de9418f7bb5",
|
||||||
|
"name": "noise",
|
||||||
|
"type": "LatentsField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
},
|
||||||
|
"steps": {
|
||||||
|
"id": "e491e664-2f8c-4f49-b3e4-57b051fbb9c5",
|
||||||
|
"name": "steps",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": 10
|
||||||
|
},
|
||||||
|
"cfg_scale": {
|
||||||
|
"id": "f0318abd-ed65-4cad-86a7-48d1c19a6d14",
|
||||||
|
"name": "cfg_scale",
|
||||||
|
"type": "FloatPolymorphic",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": 7.5
|
||||||
|
},
|
||||||
|
"denoising_start": {
|
||||||
|
"id": "f7c24c51-496f-44c4-836a-c734e529fec0",
|
||||||
|
"name": "denoising_start",
|
||||||
|
"type": "float",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"denoising_end": {
|
||||||
|
"id": "54f7656a-fb0d-4d9e-a459-f700f7dccd2e",
|
||||||
|
"name": "denoising_end",
|
||||||
|
"type": "float",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": 1
|
||||||
|
},
|
||||||
|
"scheduler": {
|
||||||
|
"id": "363ee440-040d-499b-bf84-bf5391b08681",
|
||||||
|
"name": "scheduler",
|
||||||
|
"type": "Scheduler",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": "euler"
|
||||||
|
},
|
||||||
|
"unet": {
|
||||||
|
"id": "5c93d4e5-1064-4700-ab1d-d12e1e9b5ba7",
|
||||||
|
"name": "unet",
|
||||||
|
"type": "UNetField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
},
|
||||||
|
"control": {
|
||||||
|
"id": "e1948eb3-7407-43b0-93e3-139470f186b7",
|
||||||
|
"name": "control",
|
||||||
|
"type": "ControlPolymorphic",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
},
|
||||||
|
"ip_adapter": {
|
||||||
|
"id": "5675b2c3-adfb-49ee-b33c-26bdbfab1fed",
|
||||||
|
"name": "ip_adapter",
|
||||||
|
"type": "IPAdapterPolymorphic",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
},
|
||||||
|
"t2i_adapter": {
|
||||||
|
"id": "89cd4ab3-3bfc-4063-9de5-91d42305c651",
|
||||||
|
"name": "t2i_adapter",
|
||||||
|
"type": "T2IAdapterPolymorphic",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
},
|
||||||
|
"latents": {
|
||||||
|
"id": "ec01df90-5042-418d-b6d6-86b251c13770",
|
||||||
|
"name": "latents",
|
||||||
|
"type": "LatentsField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
},
|
||||||
|
"denoise_mask": {
|
||||||
|
"id": "561cde00-cb20-42ae-9bd3-4f477f73fbe1",
|
||||||
|
"name": "denoise_mask",
|
||||||
|
"type": "DenoiseMaskField",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"latents": {
|
||||||
|
"id": "f9addefe-efcc-4e01-8945-6ebbc934b002",
|
||||||
|
"name": "latents",
|
||||||
|
"type": "LatentsField",
|
||||||
|
"fieldKind": "output"
|
||||||
|
},
|
||||||
|
"width": {
|
||||||
|
"id": "6d48f78b-d681-422a-8677-0111bd0625f1",
|
||||||
|
"name": "width",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "output"
|
||||||
|
},
|
||||||
|
"height": {
|
||||||
|
"id": "f25997b8-6316-44ce-b696-b82e4ed51ae5",
|
||||||
|
"name": "height",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "output"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"label": "",
|
||||||
|
"isOpen": true,
|
||||||
|
"notes": "",
|
||||||
|
"embedWorkflow": false,
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": true,
|
||||||
|
"version": "1.4.0"
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 646,
|
||||||
|
"position": {
|
||||||
|
"x": 1597.9598293300219,
|
||||||
|
"y": 1420.4637727891632
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "59349822-af20-4e0e-a53f-3ba135d00c3f",
|
||||||
|
"type": "invocation",
|
||||||
|
"data": {
|
||||||
|
"id": "59349822-af20-4e0e-a53f-3ba135d00c3f",
|
||||||
|
"type": "rand_int",
|
||||||
|
"inputs": {
|
||||||
|
"low": {
|
||||||
|
"id": "051f22f9-2d4f-414f-bc51-84af2d626efa",
|
||||||
|
"name": "low",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"high": {
|
||||||
|
"id": "77206186-f264-4224-9589-f925cf903dc9",
|
||||||
|
"name": "high",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "input",
|
||||||
|
"label": "",
|
||||||
|
"value": 2147483647
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"outputs": {
|
||||||
|
"value": {
|
||||||
|
"id": "a7ed9387-3a24-4d34-b7c5-f713bd544ab1",
|
||||||
|
"name": "value",
|
||||||
|
"type": "integer",
|
||||||
|
"fieldKind": "output"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"label": "",
|
||||||
|
"isOpen": false,
|
||||||
|
"notes": "",
|
||||||
|
"embedWorkflow": false,
|
||||||
|
"isIntermediate": true,
|
||||||
|
"useCache": false,
|
||||||
|
"version": "1.0.0"
|
||||||
|
},
|
||||||
|
"width": 320,
|
||||||
|
"height": 32,
|
||||||
|
"position": {
|
||||||
|
"x": 1178.16746986153,
|
||||||
|
"y": 1663.9433412808876
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"edges": [
|
||||||
|
{
|
||||||
|
"source": "59349822-af20-4e0e-a53f-3ba135d00c3f",
|
||||||
|
"target": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a",
|
||||||
|
"id": "59349822-af20-4e0e-a53f-3ba135d00c3f-280fd8a7-3b0c-49fe-8be4-6246e08b6c9a-collapsed",
|
||||||
|
"type": "collapsed"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": "610384f1-6f0c-4847-a9a2-37ce7f456ed1",
|
||||||
|
"sourceHandle": "clip",
|
||||||
|
"target": "aca3b054-bfba-4392-bd20-6476f59504df",
|
||||||
|
"targetHandle": "clip",
|
||||||
|
"id": "reactflow__edge-610384f1-6f0c-4847-a9a2-37ce7f456ed1clip-aca3b054-bfba-4392-bd20-6476f59504dfclip",
|
||||||
|
"type": "default"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": "610384f1-6f0c-4847-a9a2-37ce7f456ed1",
|
||||||
|
"sourceHandle": "clip",
|
||||||
|
"target": "3db7cee0-31e2-4a3d-94a1-268cb16177dd",
|
||||||
|
"targetHandle": "clip",
|
||||||
|
"id": "reactflow__edge-610384f1-6f0c-4847-a9a2-37ce7f456ed1clip-3db7cee0-31e2-4a3d-94a1-268cb16177ddclip",
|
||||||
|
"type": "default"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": "a6cc0986-f928-4a7e-8d44-ba2d4b36f54a",
|
||||||
|
"sourceHandle": "image",
|
||||||
|
"target": "2ac03cf6-0326-454a-bed0-d8baef2bf30d",
|
||||||
|
"targetHandle": "image",
|
||||||
|
"id": "reactflow__edge-a6cc0986-f928-4a7e-8d44-ba2d4b36f54aimage-2ac03cf6-0326-454a-bed0-d8baef2bf30dimage",
|
||||||
|
"type": "default"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": "610384f1-6f0c-4847-a9a2-37ce7f456ed1",
|
||||||
|
"sourceHandle": "vae",
|
||||||
|
"target": "28542b66-5a00-4780-a318-0a036d2df914",
|
||||||
|
"targetHandle": "vae",
|
||||||
|
"id": "reactflow__edge-610384f1-6f0c-4847-a9a2-37ce7f456ed1vae-28542b66-5a00-4780-a318-0a036d2df914vae",
|
||||||
|
"type": "default"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a",
|
||||||
|
"sourceHandle": "noise",
|
||||||
|
"target": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
|
||||||
|
"targetHandle": "noise",
|
||||||
|
"id": "reactflow__edge-280fd8a7-3b0c-49fe-8be4-6246e08b6c9anoise-9755ae4c-ef30-4db3-80f6-a31f98979a11noise",
|
||||||
|
"type": "default"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": "3db7cee0-31e2-4a3d-94a1-268cb16177dd",
|
||||||
|
"sourceHandle": "conditioning",
|
||||||
|
"target": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
|
||||||
|
"targetHandle": "negative_conditioning",
|
||||||
|
"id": "reactflow__edge-3db7cee0-31e2-4a3d-94a1-268cb16177ddconditioning-9755ae4c-ef30-4db3-80f6-a31f98979a11negative_conditioning",
|
||||||
|
"type": "default"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": "aca3b054-bfba-4392-bd20-6476f59504df",
|
||||||
|
"sourceHandle": "conditioning",
|
||||||
|
"target": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
|
||||||
|
"targetHandle": "positive_conditioning",
|
||||||
|
"id": "reactflow__edge-aca3b054-bfba-4392-bd20-6476f59504dfconditioning-9755ae4c-ef30-4db3-80f6-a31f98979a11positive_conditioning",
|
||||||
|
"type": "default"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": "610384f1-6f0c-4847-a9a2-37ce7f456ed1",
|
||||||
|
"sourceHandle": "unet",
|
||||||
|
"target": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
|
||||||
|
"targetHandle": "unet",
|
||||||
|
"id": "reactflow__edge-610384f1-6f0c-4847-a9a2-37ce7f456ed1unet-9755ae4c-ef30-4db3-80f6-a31f98979a11unet",
|
||||||
|
"type": "default"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": "2ac03cf6-0326-454a-bed0-d8baef2bf30d",
|
||||||
|
"sourceHandle": "control",
|
||||||
|
"target": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
|
||||||
|
"targetHandle": "control",
|
||||||
|
"id": "reactflow__edge-2ac03cf6-0326-454a-bed0-d8baef2bf30dcontrol-9755ae4c-ef30-4db3-80f6-a31f98979a11control",
|
||||||
|
"type": "default"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": "9755ae4c-ef30-4db3-80f6-a31f98979a11",
|
||||||
|
"sourceHandle": "latents",
|
||||||
|
"target": "28542b66-5a00-4780-a318-0a036d2df914",
|
||||||
|
"targetHandle": "latents",
|
||||||
|
"id": "reactflow__edge-9755ae4c-ef30-4db3-80f6-a31f98979a11latents-28542b66-5a00-4780-a318-0a036d2df914latents",
|
||||||
|
"type": "default"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"source": "59349822-af20-4e0e-a53f-3ba135d00c3f",
|
||||||
|
"sourceHandle": "value",
|
||||||
|
"target": "280fd8a7-3b0c-49fe-8be4-6246e08b6c9a",
|
||||||
|
"targetHandle": "seed",
|
||||||
|
"id": "reactflow__edge-59349822-af20-4e0e-a53f-3ba135d00c3fvalue-280fd8a7-3b0c-49fe-8be4-6246e08b6c9aseed",
|
||||||
|
"type": "default"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@ -2,56 +2,60 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
cd "$(dirname "$0")"
|
BCYAN="\e[1;36m"
|
||||||
|
BYELLOW="\e[1;33m"
|
||||||
|
BGREEN="\e[1;32m"
|
||||||
|
BRED="\e[1;31m"
|
||||||
|
RED="\e[31m"
|
||||||
|
RESET="\e[0m"
|
||||||
|
|
||||||
|
function is_bin_in_path {
|
||||||
|
builtin type -P "$1" &>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
function git_show {
|
||||||
|
git show -s --format=oneline --abbrev-commit "$1" | cat
|
||||||
|
}
|
||||||
|
|
||||||
if [[ -v "VIRTUAL_ENV" ]]; then
|
if [[ -v "VIRTUAL_ENV" ]]; then
|
||||||
# we can't just call 'deactivate' because this function is not exported
|
# we can't just call 'deactivate' because this function is not exported
|
||||||
# to the environment of this script from the bash process that runs the script
|
# to the environment of this script from the bash process that runs the script
|
||||||
echo "A virtual environment is activated. Please deactivate it before proceeding".
|
echo -e "${BRED}A virtual environment is activated. Please deactivate it before proceeding.${RESET}"
|
||||||
exit -1
|
exit -1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
VERSION=$(cd ..; python -c "from invokeai.version import __version__ as version; print(version)")
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
echo
|
||||||
|
echo -e "${BYELLOW}This script must be run from the installer directory!${RESET}"
|
||||||
|
echo "The current working directory is $(pwd)"
|
||||||
|
read -p "If that looks right, press any key to proceed, or CTRL-C to exit..."
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Some machines only have `python3` in PATH, others have `python` - make an alias.
|
||||||
|
# We can use a function to approximate an alias within a non-interactive shell.
|
||||||
|
if ! is_bin_in_path python && is_bin_in_path python3; then
|
||||||
|
function python {
|
||||||
|
python3 "$@"
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
VERSION=$(
|
||||||
|
cd ..
|
||||||
|
python -c "from invokeai.version import __version__ as version; print(version)"
|
||||||
|
)
|
||||||
PATCH=""
|
PATCH=""
|
||||||
VERSION="v${VERSION}${PATCH}"
|
VERSION="v${VERSION}${PATCH}"
|
||||||
LATEST_TAG="v3-latest"
|
|
||||||
|
|
||||||
echo Building installer for version $VERSION
|
echo -e "${BGREEN}HEAD${RESET}:"
|
||||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
git_show HEAD
|
||||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
echo
|
||||||
|
|
||||||
read -e -p "Tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
|
||||||
RESPONSE=${input:='n'}
|
|
||||||
if [ "$RESPONSE" == 'y' ]; then
|
|
||||||
|
|
||||||
git push origin :refs/tags/$VERSION
|
|
||||||
if ! git tag -fa $VERSION ; then
|
|
||||||
echo "Existing/invalid tag"
|
|
||||||
exit -1
|
|
||||||
fi
|
|
||||||
|
|
||||||
git push origin :refs/tags/$LATEST_TAG
|
|
||||||
git tag -fa $LATEST_TAG
|
|
||||||
|
|
||||||
echo "remember to push --tags!"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ----------------------
|
# ----------------------
|
||||||
|
|
||||||
echo Building the wheel
|
echo
|
||||||
|
echo "Building installer zip files for InvokeAI ${VERSION}..."
|
||||||
# install the 'build' package in the user site packages, if needed
|
echo
|
||||||
# could be improved by using a temporary venv, but it's tiny and harmless
|
|
||||||
if [[ $(python -c 'from importlib.util import find_spec; print(find_spec("build") is None)') == "True" ]]; then
|
|
||||||
pip install --user build
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -r ../build
|
|
||||||
python -m build --wheel --outdir dist/ ../.
|
|
||||||
|
|
||||||
# ----------------------
|
|
||||||
|
|
||||||
echo Building installer zip fles for InvokeAI $VERSION
|
|
||||||
|
|
||||||
# get rid of any old ones
|
# get rid of any old ones
|
||||||
rm -f *.zip
|
rm -f *.zip
|
||||||
@ -59,12 +63,11 @@ rm -rf InvokeAI-Installer
|
|||||||
|
|
||||||
# copy content
|
# copy content
|
||||||
mkdir InvokeAI-Installer
|
mkdir InvokeAI-Installer
|
||||||
for f in templates lib *.txt *.reg; do
|
for f in templates *.txt *.reg; do
|
||||||
cp -r ${f} InvokeAI-Installer/
|
cp -r ${f} InvokeAI-Installer/
|
||||||
done
|
done
|
||||||
|
mkdir InvokeAI-Installer/lib
|
||||||
# Move the wheel
|
cp lib/*.py InvokeAI-Installer/lib
|
||||||
mv dist/*.whl InvokeAI-Installer/lib/
|
|
||||||
|
|
||||||
# Install scripts
|
# Install scripts
|
||||||
# Mac/Linux
|
# Mac/Linux
|
||||||
@ -72,13 +75,13 @@ cp install.sh.in InvokeAI-Installer/install.sh
|
|||||||
chmod a+x InvokeAI-Installer/install.sh
|
chmod a+x InvokeAI-Installer/install.sh
|
||||||
|
|
||||||
# Windows
|
# Windows
|
||||||
perl -p -e "s/^set INVOKEAI_VERSION=.*/set INVOKEAI_VERSION=$VERSION/" install.bat.in > InvokeAI-Installer/install.bat
|
cp install.bat.in InvokeAI-Installer/install.bat
|
||||||
cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
||||||
|
|
||||||
# Zip everything up
|
# Zip everything up
|
||||||
zip -r InvokeAI-installer-$VERSION.zip InvokeAI-Installer
|
zip -r InvokeAI-installer-$VERSION.zip InvokeAI-Installer
|
||||||
|
|
||||||
# clean up
|
# clean up
|
||||||
rm -rf InvokeAI-Installer tmp dist
|
rm -rf InvokeAI-Installer tmp dist ../invokeai/frontend/web/dist/
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
@echo off
|
@echo off
|
||||||
setlocal EnableExtensions EnableDelayedExpansion
|
setlocal EnableExtensions EnableDelayedExpansion
|
||||||
|
|
||||||
@rem This script requires the user to install Python 3.9 or higher. All other
|
@rem This script requires the user to install Python 3.10 or higher. All other
|
||||||
@rem requirements are downloaded as needed.
|
@rem requirements are downloaded as needed.
|
||||||
|
|
||||||
@rem change to the script's directory
|
@rem change to the script's directory
|
||||||
@ -15,11 +15,10 @@ if "%1" == "use-cache" (
|
|||||||
@rem Config
|
@rem Config
|
||||||
@rem The version in the next line is replaced by an up to date release number
|
@rem The version in the next line is replaced by an up to date release number
|
||||||
@rem when create_installer.sh is run. Change the release number there.
|
@rem when create_installer.sh is run. Change the release number there.
|
||||||
set INVOKEAI_VERSION=latest
|
|
||||||
set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||||
set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||||
set PYTHON_URL=https://www.python.org/downloads/windows/
|
set PYTHON_URL=https://www.python.org/downloads/windows/
|
||||||
set MINIMUM_PYTHON_VERSION=3.9.0
|
set MINIMUM_PYTHON_VERSION=3.10.0
|
||||||
set PYTHON_URL=https://www.python.org/downloads/release/python-3109/
|
set PYTHON_URL=https://www.python.org/downloads/release/python-3109/
|
||||||
|
|
||||||
set err_msg=An error has occurred and the script could not continue.
|
set err_msg=An error has occurred and the script could not continue.
|
||||||
@ -28,8 +27,7 @@ set err_msg=An error has occurred and the script could not continue.
|
|||||||
echo This script will install InvokeAI and its dependencies.
|
echo This script will install InvokeAI and its dependencies.
|
||||||
echo.
|
echo.
|
||||||
echo BEFORE YOU START PLEASE MAKE SURE TO DO THE FOLLOWING
|
echo BEFORE YOU START PLEASE MAKE SURE TO DO THE FOLLOWING
|
||||||
echo 1. Install python 3.9 or 3.10. Python version 3.11 and above are
|
echo 1. Install python 3.10 or 3.11. Python version 3.9 is no longer supported.
|
||||||
echo not supported at the moment.
|
|
||||||
echo 2. Double-click on the file WinLongPathsEnabled.reg in order to
|
echo 2. Double-click on the file WinLongPathsEnabled.reg in order to
|
||||||
echo enable long path support on your system.
|
echo enable long path support on your system.
|
||||||
echo 3. Install the Visual C++ core libraries.
|
echo 3. Install the Visual C++ core libraries.
|
||||||
@ -46,19 +44,19 @@ echo ***** Checking and Updating Python *****
|
|||||||
|
|
||||||
call python --version >.tmp1 2>.tmp2
|
call python --version >.tmp1 2>.tmp2
|
||||||
if %errorlevel% == 1 (
|
if %errorlevel% == 1 (
|
||||||
set err_msg=Please install Python 3.10. See %INSTRUCTIONS% for details.
|
set err_msg=Please install Python 3.10-11. See %INSTRUCTIONS% for details.
|
||||||
goto err_exit
|
goto err_exit
|
||||||
)
|
)
|
||||||
|
|
||||||
for /f "tokens=2" %%i in (.tmp1) do set python_version=%%i
|
for /f "tokens=2" %%i in (.tmp1) do set python_version=%%i
|
||||||
if "%python_version%" == "" (
|
if "%python_version%" == "" (
|
||||||
set err_msg=No python was detected on your system. Please install Python version %MINIMUM_PYTHON_VERSION% or higher. We recommend Python 3.10.9 from %PYTHON_URL%
|
set err_msg=No python was detected on your system. Please install Python version %MINIMUM_PYTHON_VERSION% or higher. We recommend Python 3.10.12 from %PYTHON_URL%
|
||||||
goto err_exit
|
goto err_exit
|
||||||
)
|
)
|
||||||
|
|
||||||
call :compareVersions %MINIMUM_PYTHON_VERSION% %python_version%
|
call :compareVersions %MINIMUM_PYTHON_VERSION% %python_version%
|
||||||
if %errorlevel% == 1 (
|
if %errorlevel% == 1 (
|
||||||
set err_msg=Your version of Python is too low. You need at least %MINIMUM_PYTHON_VERSION% but you have %python_version%. We recommend Python 3.10.9 from %PYTHON_URL%
|
set err_msg=Your version of Python is too low. You need at least %MINIMUM_PYTHON_VERSION% but you have %python_version%. We recommend Python 3.10.12 from %PYTHON_URL%
|
||||||
goto err_exit
|
goto err_exit
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -8,10 +8,10 @@ cd $scriptdir
|
|||||||
|
|
||||||
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
||||||
|
|
||||||
MINIMUM_PYTHON_VERSION=3.9.0
|
MINIMUM_PYTHON_VERSION=3.10.0
|
||||||
MAXIMUM_PYTHON_VERSION=3.11.100
|
MAXIMUM_PYTHON_VERSION=3.11.100
|
||||||
PYTHON=""
|
PYTHON=""
|
||||||
for candidate in python3.11 python3.10 python3.9 python3 python ; do
|
for candidate in python3.11 python3.10 python3 python ; do
|
||||||
if ppath=`which $candidate`; then
|
if ppath=`which $candidate`; then
|
||||||
# when using `pyenv`, the executable for an inactive Python version will exist but will not be operational
|
# when using `pyenv`, the executable for an inactive Python version will exist but will not be operational
|
||||||
# we check that this found executable can actually run
|
# we check that this found executable can actually run
|
||||||
|
@ -11,9 +11,9 @@ import sys
|
|||||||
import venv
|
import venv
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from tempfile import TemporaryDirectory
|
from tempfile import TemporaryDirectory
|
||||||
from typing import Union
|
from typing import Optional, Tuple
|
||||||
|
|
||||||
SUPPORTED_PYTHON = ">=3.9.0,<=3.11.100"
|
SUPPORTED_PYTHON = ">=3.10.0,<=3.11.100"
|
||||||
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
|
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
|
||||||
BOOTSTRAP_VENV_PREFIX = "invokeai-installer-tmp"
|
BOOTSTRAP_VENV_PREFIX = "invokeai-installer-tmp"
|
||||||
|
|
||||||
@ -21,40 +21,20 @@ OS = platform.uname().system
|
|||||||
ARCH = platform.uname().machine
|
ARCH = platform.uname().machine
|
||||||
VERSION = "latest"
|
VERSION = "latest"
|
||||||
|
|
||||||
### Feature flags
|
|
||||||
# Install the virtualenv into the runtime dir
|
|
||||||
FF_VENV_IN_RUNTIME = True
|
|
||||||
|
|
||||||
# Install the wheel packaged with the installer
|
|
||||||
FF_USE_LOCAL_WHEEL = True
|
|
||||||
|
|
||||||
|
|
||||||
class Installer:
|
class Installer:
|
||||||
"""
|
"""
|
||||||
Deploys an InvokeAI installation into a given path
|
Deploys an InvokeAI installation into a given path
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
reqs: list[str] = INSTALLER_REQS
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.reqs = INSTALLER_REQS
|
|
||||||
self.preflight()
|
|
||||||
if os.getenv("VIRTUAL_ENV") is not None:
|
if os.getenv("VIRTUAL_ENV") is not None:
|
||||||
print("A virtual environment is already activated. Please 'deactivate' before installation.")
|
print("A virtual environment is already activated. Please 'deactivate' before installation.")
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
self.bootstrap()
|
self.bootstrap()
|
||||||
|
self.available_releases = get_github_releases()
|
||||||
def preflight(self) -> None:
|
|
||||||
"""
|
|
||||||
Preflight checks
|
|
||||||
"""
|
|
||||||
|
|
||||||
# TODO
|
|
||||||
# verify python version
|
|
||||||
# on macOS verify XCode tools are present
|
|
||||||
# verify libmesa, libglx on linux
|
|
||||||
# check that the system arch is not i386 (?)
|
|
||||||
# check that the system has a GPU, and the type of GPU
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
def mktemp_venv(self) -> TemporaryDirectory:
|
def mktemp_venv(self) -> TemporaryDirectory:
|
||||||
"""
|
"""
|
||||||
@ -67,7 +47,6 @@ class Installer:
|
|||||||
# Cleaning up temporary directories on Windows results in a race condition
|
# Cleaning up temporary directories on Windows results in a race condition
|
||||||
# and a stack trace.
|
# and a stack trace.
|
||||||
# `ignore_cleanup_errors` was only added in Python 3.10
|
# `ignore_cleanup_errors` was only added in Python 3.10
|
||||||
# users of Python 3.9 will see a gnarly stack trace on installer exit
|
|
||||||
if OS == "Windows" and int(platform.python_version_tuple()[1]) >= 10:
|
if OS == "Windows" and int(platform.python_version_tuple()[1]) >= 10:
|
||||||
venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX, ignore_cleanup_errors=True)
|
venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX, ignore_cleanup_errors=True)
|
||||||
else:
|
else:
|
||||||
@ -79,12 +58,9 @@ class Installer:
|
|||||||
|
|
||||||
return venv_dir
|
return venv_dir
|
||||||
|
|
||||||
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory:
|
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory | None:
|
||||||
"""
|
"""
|
||||||
Bootstrap the installer venv with packages required at install time
|
Bootstrap the installer venv with packages required at install time
|
||||||
|
|
||||||
:return: path to the virtual environment directory that was bootstrapped
|
|
||||||
:rtype: TemporaryDirectory
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
print("Initializing the installer. This may take a minute - please wait...")
|
print("Initializing the installer. This may take a minute - please wait...")
|
||||||
@ -96,39 +72,27 @@ class Installer:
|
|||||||
cmd.extend(self.reqs)
|
cmd.extend(self.reqs)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
res = subprocess.check_output(cmd).decode()
|
# upgrade pip to the latest version to avoid a confusing message
|
||||||
|
res = upgrade_pip(Path(venv_dir.name))
|
||||||
if verbose:
|
if verbose:
|
||||||
print(res)
|
print(res)
|
||||||
|
|
||||||
|
# run the install prerequisites installation
|
||||||
|
res = subprocess.check_output(cmd).decode()
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
print(res)
|
||||||
|
|
||||||
return venv_dir
|
return venv_dir
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
def app_venv(self, path: str = None):
|
def app_venv(self, venv_parent) -> Path:
|
||||||
"""
|
"""
|
||||||
Create a virtualenv for the InvokeAI installation
|
Create a virtualenv for the InvokeAI installation
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# explicit venv location
|
venv_dir = venv_parent / ".venv"
|
||||||
# currently unused in normal operation
|
|
||||||
# useful for testing or special cases
|
|
||||||
if path is not None:
|
|
||||||
venv_dir = Path(path)
|
|
||||||
|
|
||||||
# experimental / testing
|
|
||||||
elif not FF_VENV_IN_RUNTIME:
|
|
||||||
if OS == "Windows":
|
|
||||||
venv_dir_parent = os.getenv("APPDATA", "~/AppData/Roaming")
|
|
||||||
elif OS == "Darwin":
|
|
||||||
# there is no environment variable on macOS to find this
|
|
||||||
# TODO: confirm this is working as expected
|
|
||||||
venv_dir_parent = "~/Library/Application Support"
|
|
||||||
elif OS == "Linux":
|
|
||||||
venv_dir_parent = os.getenv("XDG_DATA_DIR", "~/.local/share")
|
|
||||||
venv_dir = Path(venv_dir_parent).expanduser().resolve() / f"InvokeAI/{VERSION}/venv"
|
|
||||||
|
|
||||||
# stable / current
|
|
||||||
else:
|
|
||||||
venv_dir = self.dest / ".venv"
|
|
||||||
|
|
||||||
# Prefer to copy python executables
|
# Prefer to copy python executables
|
||||||
# so that updates to system python don't break InvokeAI
|
# so that updates to system python don't break InvokeAI
|
||||||
@ -139,17 +103,10 @@ class Installer:
|
|||||||
except shutil.SameFileError:
|
except shutil.SameFileError:
|
||||||
venv.create(venv_dir, with_pip=True, symlinks=True)
|
venv.create(venv_dir, with_pip=True, symlinks=True)
|
||||||
|
|
||||||
# upgrade pip in Python 3.9 environments
|
|
||||||
if int(platform.python_version_tuple()[1]) == 9:
|
|
||||||
from plumbum import FG, local
|
|
||||||
|
|
||||||
pip = local[get_pip_from_venv(venv_dir)]
|
|
||||||
pip["install", "--upgrade", "pip"] & FG
|
|
||||||
|
|
||||||
return venv_dir
|
return venv_dir
|
||||||
|
|
||||||
def install(
|
def install(
|
||||||
self, root: str = "~/invokeai", version: str = "latest", yes_to_all=False, find_links: Path = None
|
self, version=None, root: str = "~/invokeai", yes_to_all=False, find_links: Optional[Path] = None
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Install the InvokeAI application into the given runtime path
|
Install the InvokeAI application into the given runtime path
|
||||||
@ -166,15 +123,20 @@ class Installer:
|
|||||||
|
|
||||||
import messages
|
import messages
|
||||||
|
|
||||||
messages.welcome()
|
messages.welcome(self.available_releases)
|
||||||
|
|
||||||
default_path = os.environ.get("INVOKEAI_ROOT") or Path(root).expanduser().resolve()
|
version = messages.choose_version(self.available_releases)
|
||||||
self.dest = default_path if yes_to_all else messages.dest_path(root)
|
|
||||||
|
auto_dest = Path(os.environ.get("INVOKEAI_ROOT", root)).expanduser().resolve()
|
||||||
|
destination = auto_dest if yes_to_all else messages.dest_path(root)
|
||||||
|
if destination is None:
|
||||||
|
print("Could not find or create the destination directory. Installation cancelled.")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
# create the venv for the app
|
# create the venv for the app
|
||||||
self.venv = self.app_venv()
|
self.venv = self.app_venv(venv_parent=destination)
|
||||||
|
|
||||||
self.instance = InvokeAiInstance(runtime=self.dest, venv=self.venv, version=version)
|
self.instance = InvokeAiInstance(runtime=destination, venv=self.venv, version=version)
|
||||||
|
|
||||||
# install dependencies and the InvokeAI application
|
# install dependencies and the InvokeAI application
|
||||||
(extra_index_url, optional_modules) = get_torch_source() if not yes_to_all else (None, None)
|
(extra_index_url, optional_modules) = get_torch_source() if not yes_to_all else (None, None)
|
||||||
@ -198,7 +160,7 @@ class InvokeAiInstance:
|
|||||||
A single runtime directory *may* be shared by multiple virtual environments, though this isn't currently tested or supported.
|
A single runtime directory *may* be shared by multiple virtual environments, though this isn't currently tested or supported.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, runtime: Path, venv: Path, version: str) -> None:
|
def __init__(self, runtime: Path, venv: Path, version: str = "stable") -> None:
|
||||||
self.runtime = runtime
|
self.runtime = runtime
|
||||||
self.venv = venv
|
self.venv = venv
|
||||||
self.pip = get_pip_from_venv(venv)
|
self.pip = get_pip_from_venv(venv)
|
||||||
@ -207,6 +169,7 @@ class InvokeAiInstance:
|
|||||||
set_sys_path(venv)
|
set_sys_path(venv)
|
||||||
os.environ["INVOKEAI_ROOT"] = str(self.runtime.expanduser().resolve())
|
os.environ["INVOKEAI_ROOT"] = str(self.runtime.expanduser().resolve())
|
||||||
os.environ["VIRTUAL_ENV"] = str(self.venv.expanduser().resolve())
|
os.environ["VIRTUAL_ENV"] = str(self.venv.expanduser().resolve())
|
||||||
|
upgrade_pip(venv)
|
||||||
|
|
||||||
def get(self) -> tuple[Path, Path]:
|
def get(self) -> tuple[Path, Path]:
|
||||||
"""
|
"""
|
||||||
@ -220,54 +183,7 @@ class InvokeAiInstance:
|
|||||||
|
|
||||||
def install(self, extra_index_url=None, optional_modules=None, find_links=None):
|
def install(self, extra_index_url=None, optional_modules=None, find_links=None):
|
||||||
"""
|
"""
|
||||||
Install this instance, including dependencies and the app itself
|
Install the package from PyPi.
|
||||||
|
|
||||||
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
|
||||||
:type extra_index_url: str
|
|
||||||
"""
|
|
||||||
|
|
||||||
import messages
|
|
||||||
|
|
||||||
# install torch first to ensure the correct version gets installed.
|
|
||||||
# works with either source or wheel install with negligible impact on installation times.
|
|
||||||
messages.simple_banner("Installing PyTorch :fire:")
|
|
||||||
self.install_torch(extra_index_url, find_links)
|
|
||||||
|
|
||||||
messages.simple_banner("Installing the InvokeAI Application :art:")
|
|
||||||
self.install_app(extra_index_url, optional_modules, find_links)
|
|
||||||
|
|
||||||
def install_torch(self, extra_index_url=None, find_links=None):
|
|
||||||
"""
|
|
||||||
Install PyTorch
|
|
||||||
"""
|
|
||||||
|
|
||||||
from plumbum import FG, local
|
|
||||||
|
|
||||||
pip = local[self.pip]
|
|
||||||
|
|
||||||
(
|
|
||||||
pip[
|
|
||||||
"install",
|
|
||||||
"--require-virtualenv",
|
|
||||||
"numpy~=1.24.0", # choose versions that won't be uninstalled during phase 2
|
|
||||||
"urllib3~=1.26.0",
|
|
||||||
"requests~=2.28.0",
|
|
||||||
"torch~=2.0.0",
|
|
||||||
"torchmetrics==0.11.4",
|
|
||||||
"torchvision>=0.14.1",
|
|
||||||
"--force-reinstall",
|
|
||||||
"--find-links" if find_links is not None else None,
|
|
||||||
find_links,
|
|
||||||
"--extra-index-url" if extra_index_url is not None else None,
|
|
||||||
extra_index_url,
|
|
||||||
]
|
|
||||||
& FG
|
|
||||||
)
|
|
||||||
|
|
||||||
def install_app(self, extra_index_url=None, optional_modules=None, find_links=None):
|
|
||||||
"""
|
|
||||||
Install the application with pip.
|
|
||||||
Supports installation from PyPi or from a local source directory.
|
|
||||||
|
|
||||||
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
||||||
:type extra_index_url: str
|
:type extra_index_url: str
|
||||||
@ -279,53 +195,52 @@ class InvokeAiInstance:
|
|||||||
:type find_links: Path
|
:type find_links: Path
|
||||||
"""
|
"""
|
||||||
|
|
||||||
## this only applies to pypi installs; TODO actually use this
|
import messages
|
||||||
if self.version == "pre":
|
|
||||||
|
# not currently used, but may be useful for "install most recent version" option
|
||||||
|
if self.version == "prerelease":
|
||||||
version = None
|
version = None
|
||||||
pre = "--pre"
|
pre_flag = "--pre"
|
||||||
|
elif self.version == "stable":
|
||||||
|
version = None
|
||||||
|
pre_flag = None
|
||||||
else:
|
else:
|
||||||
version = self.version
|
version = self.version
|
||||||
pre = None
|
pre_flag = None
|
||||||
|
|
||||||
## TODO: only local wheel will be installed as of now; support for --version arg is TODO
|
src = "invokeai"
|
||||||
if FF_USE_LOCAL_WHEEL:
|
if optional_modules:
|
||||||
# if no wheel, try to do a source install before giving up
|
src += optional_modules
|
||||||
try:
|
if version:
|
||||||
src = str(next(Path(__file__).parent.glob("InvokeAI-*.whl")))
|
src += f"=={version}"
|
||||||
except StopIteration:
|
|
||||||
try:
|
|
||||||
src = Path(__file__).parents[1].expanduser().resolve()
|
|
||||||
# if the above directory contains one of these files, we'll do a source install
|
|
||||||
next(src.glob("pyproject.toml"))
|
|
||||||
next(src.glob("invokeai"))
|
|
||||||
except StopIteration:
|
|
||||||
print("Unable to find a wheel or perform a source install. Giving up.")
|
|
||||||
|
|
||||||
elif version == "source":
|
messages.simple_banner("Installing the InvokeAI Application :art:")
|
||||||
# this makes an assumption about the location of the installer package in the source tree
|
|
||||||
src = Path(__file__).parents[1].expanduser().resolve()
|
|
||||||
else:
|
|
||||||
# will install from PyPi
|
|
||||||
src = f"invokeai=={version}" if version is not None else "invokeai"
|
|
||||||
|
|
||||||
from plumbum import FG, local
|
from plumbum import FG, ProcessExecutionError, local # type: ignore
|
||||||
|
|
||||||
pip = local[self.pip]
|
pip = local[self.pip]
|
||||||
|
|
||||||
(
|
pipeline = pip[
|
||||||
pip[
|
|
||||||
"install",
|
"install",
|
||||||
"--require-virtualenv",
|
"--require-virtualenv",
|
||||||
|
"--force-reinstall",
|
||||||
"--use-pep517",
|
"--use-pep517",
|
||||||
str(src) + (optional_modules if optional_modules else ""),
|
str(src),
|
||||||
"--find-links" if find_links is not None else None,
|
"--find-links" if find_links is not None else None,
|
||||||
find_links,
|
find_links,
|
||||||
"--extra-index-url" if extra_index_url is not None else None,
|
"--extra-index-url" if extra_index_url is not None else None,
|
||||||
extra_index_url,
|
extra_index_url,
|
||||||
pre,
|
pre_flag,
|
||||||
]
|
]
|
||||||
& FG
|
|
||||||
|
try:
|
||||||
|
_ = pipeline & FG
|
||||||
|
except ProcessExecutionError as e:
|
||||||
|
print(f"Error: {e}")
|
||||||
|
print(
|
||||||
|
"Could not install InvokeAI. Please try downloading the latest version of the installer and install again."
|
||||||
)
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
def configure(self):
|
def configure(self):
|
||||||
"""
|
"""
|
||||||
@ -381,7 +296,6 @@ class InvokeAiInstance:
|
|||||||
|
|
||||||
ext = "bat" if OS == "Windows" else "sh"
|
ext = "bat" if OS == "Windows" else "sh"
|
||||||
|
|
||||||
# scripts = ['invoke', 'update']
|
|
||||||
scripts = ["invoke"]
|
scripts = ["invoke"]
|
||||||
|
|
||||||
for script in scripts:
|
for script in scripts:
|
||||||
@ -416,6 +330,23 @@ def get_pip_from_venv(venv_path: Path) -> str:
|
|||||||
return str(venv_path.expanduser().resolve() / pip)
|
return str(venv_path.expanduser().resolve() / pip)
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade_pip(venv_path: Path) -> str | None:
|
||||||
|
"""
|
||||||
|
Upgrade the pip executable in the given virtual environment
|
||||||
|
"""
|
||||||
|
|
||||||
|
python = "Scripts\\python.exe" if OS == "Windows" else "bin/python"
|
||||||
|
python = str(venv_path.expanduser().resolve() / python)
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = subprocess.check_output([python, "-m", "pip", "install", "--upgrade", "pip"]).decode()
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(e)
|
||||||
|
result = None
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
def set_sys_path(venv_path: Path) -> None:
|
def set_sys_path(venv_path: Path) -> None:
|
||||||
"""
|
"""
|
||||||
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
|
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
|
||||||
@ -439,7 +370,43 @@ def set_sys_path(venv_path: Path) -> None:
|
|||||||
sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve()))
|
sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve()))
|
||||||
|
|
||||||
|
|
||||||
def get_torch_source() -> (Union[str, None], str):
|
def get_github_releases() -> tuple[list, list] | None:
|
||||||
|
"""
|
||||||
|
Query Github for published (pre-)release versions.
|
||||||
|
Return a tuple where the first element is a list of stable releases and the second element is a list of pre-releases.
|
||||||
|
Return None if the query fails for any reason.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
## get latest releases using github api
|
||||||
|
url = "https://api.github.com/repos/invoke-ai/InvokeAI/releases"
|
||||||
|
releases, pre_releases = [], []
|
||||||
|
try:
|
||||||
|
res = requests.get(url)
|
||||||
|
res.raise_for_status()
|
||||||
|
tag_info = res.json()
|
||||||
|
for tag in tag_info:
|
||||||
|
if not tag["prerelease"]:
|
||||||
|
releases.append(tag["tag_name"].lstrip("v"))
|
||||||
|
else:
|
||||||
|
pre_releases.append(tag["tag_name"].lstrip("v"))
|
||||||
|
except requests.HTTPError as e:
|
||||||
|
print(f"Error: {e}")
|
||||||
|
print("Could not fetch version information from GitHub. Please check your network connection and try again.")
|
||||||
|
return
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}")
|
||||||
|
print("An unexpected error occurred while trying to fetch version information from GitHub. Please try again.")
|
||||||
|
return
|
||||||
|
|
||||||
|
releases.sort(reverse=True)
|
||||||
|
pre_releases.sort(reverse=True)
|
||||||
|
|
||||||
|
return releases, pre_releases
|
||||||
|
|
||||||
|
|
||||||
|
def get_torch_source() -> Tuple[str | None, str | None]:
|
||||||
"""
|
"""
|
||||||
Determine the extra index URL for pip to use for torch installation.
|
Determine the extra index URL for pip to use for torch installation.
|
||||||
This depends on the OS and the graphics accelerator in use.
|
This depends on the OS and the graphics accelerator in use.
|
||||||
@ -454,24 +421,25 @@ def get_torch_source() -> (Union[str, None], str):
|
|||||||
:rtype: list
|
:rtype: list
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from messages import graphical_accelerator
|
from messages import select_gpu
|
||||||
|
|
||||||
# device can be one of: "cuda", "rocm", "cpu", "idk"
|
# device can be one of: "cuda", "rocm", "cpu", "cuda_and_dml, autodetect"
|
||||||
device = graphical_accelerator()
|
device = select_gpu()
|
||||||
|
|
||||||
url = None
|
url = None
|
||||||
optional_modules = "[onnx]"
|
optional_modules = "[onnx]"
|
||||||
if OS == "Linux":
|
if OS == "Linux":
|
||||||
if device == "rocm":
|
if device.value == "rocm":
|
||||||
url = "https://download.pytorch.org/whl/rocm5.4.2"
|
url = "https://download.pytorch.org/whl/rocm5.6"
|
||||||
elif device == "cpu":
|
elif device.value == "cpu":
|
||||||
url = "https://download.pytorch.org/whl/cpu"
|
url = "https://download.pytorch.org/whl/cpu"
|
||||||
|
|
||||||
if device == "cuda":
|
elif OS == "Windows":
|
||||||
url = "https://download.pytorch.org/whl/cu118"
|
if device.value == "cuda":
|
||||||
|
url = "https://download.pytorch.org/whl/cu121"
|
||||||
optional_modules = "[xformers,onnx-cuda]"
|
optional_modules = "[xformers,onnx-cuda]"
|
||||||
if device == "cuda_and_dml":
|
if device.value == "cuda_and_dml":
|
||||||
url = "https://download.pytorch.org/whl/cu118"
|
url = "https://download.pytorch.org/whl/cu121"
|
||||||
optional_modules = "[xformers,onnx-directml]"
|
optional_modules = "[xformers,onnx-directml]"
|
||||||
|
|
||||||
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13
|
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13
|
||||||
|
@ -5,10 +5,11 @@ Installer user interaction
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
|
from enum import Enum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from prompt_toolkit import HTML, prompt
|
from prompt_toolkit import HTML, prompt
|
||||||
from prompt_toolkit.completion import PathCompleter
|
from prompt_toolkit.completion import FuzzyWordCompleter, PathCompleter
|
||||||
from prompt_toolkit.validation import Validator
|
from prompt_toolkit.validation import Validator
|
||||||
from rich import box, print
|
from rich import box, print
|
||||||
from rich.console import Console, Group, group
|
from rich.console import Console, Group, group
|
||||||
@ -35,16 +36,26 @@ else:
|
|||||||
console = Console(style=Style(color="grey74", bgcolor="grey19"))
|
console = Console(style=Style(color="grey74", bgcolor="grey19"))
|
||||||
|
|
||||||
|
|
||||||
def welcome():
|
def welcome(available_releases: tuple | None = None) -> None:
|
||||||
@group()
|
@group()
|
||||||
def text():
|
def text():
|
||||||
if (platform_specific := _platform_specific_help()) != "":
|
if (platform_specific := _platform_specific_help()) is not None:
|
||||||
yield platform_specific
|
yield platform_specific
|
||||||
yield ""
|
yield ""
|
||||||
yield Text.from_markup(
|
yield Text.from_markup(
|
||||||
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.",
|
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.",
|
||||||
justify="center",
|
justify="center",
|
||||||
)
|
)
|
||||||
|
if available_releases is not None:
|
||||||
|
latest_stable = available_releases[0][0]
|
||||||
|
last_pre = available_releases[1][0]
|
||||||
|
yield ""
|
||||||
|
yield Text.from_markup(
|
||||||
|
f"[red3]🠶[/] Latest stable release (recommended): [b bright_white]{latest_stable}", justify="center"
|
||||||
|
)
|
||||||
|
yield Text.from_markup(
|
||||||
|
f"[red3]🠶[/] Last published pre-release version: [b bright_white]{last_pre}", justify="center"
|
||||||
|
)
|
||||||
|
|
||||||
console.rule()
|
console.rule()
|
||||||
print(
|
print(
|
||||||
@ -61,19 +72,30 @@ def welcome():
|
|||||||
console.line()
|
console.line()
|
||||||
|
|
||||||
|
|
||||||
def confirm_install(dest: Path) -> bool:
|
def choose_version(available_releases: tuple | None = None) -> str:
|
||||||
if dest.exists():
|
"""
|
||||||
print(f":exclamation: Directory {dest} already exists :exclamation:")
|
Prompt the user to choose an Invoke version to install
|
||||||
dest_confirmed = Confirm.ask(
|
"""
|
||||||
":stop_sign: (re)install in this location?",
|
|
||||||
default=False,
|
# short circuit if we couldn't get a version list
|
||||||
|
# still try to install the latest stable version
|
||||||
|
if available_releases is None:
|
||||||
|
return "stable"
|
||||||
|
|
||||||
|
console.print(":grey_question: [orange3]Please choose an Invoke version to install.")
|
||||||
|
|
||||||
|
choices = available_releases[0] + available_releases[1]
|
||||||
|
|
||||||
|
response = prompt(
|
||||||
|
message=f" <Enter> to install the recommended release ({choices[0]}). <Tab> or type to pick a version: ",
|
||||||
|
complete_while_typing=True,
|
||||||
|
completer=FuzzyWordCompleter(choices),
|
||||||
)
|
)
|
||||||
else:
|
console.print(f" Version {choices[0] if response == '' else response} will be installed.")
|
||||||
print(f"InvokeAI will be installed in {dest}")
|
|
||||||
dest_confirmed = Confirm.ask("Use this location?", default=True)
|
|
||||||
console.line()
|
console.line()
|
||||||
|
|
||||||
return dest_confirmed
|
return "stable" if response == "" else response
|
||||||
|
|
||||||
|
|
||||||
def user_wants_auto_configuration() -> bool:
|
def user_wants_auto_configuration() -> bool:
|
||||||
@ -109,7 +131,23 @@ def user_wants_auto_configuration() -> bool:
|
|||||||
return choice.lower().startswith("a")
|
return choice.lower().startswith("a")
|
||||||
|
|
||||||
|
|
||||||
def dest_path(dest=None) -> Path:
|
def confirm_install(dest: Path) -> bool:
|
||||||
|
if dest.exists():
|
||||||
|
print(f":stop_sign: Directory {dest} already exists!")
|
||||||
|
print(" Is this location correct?")
|
||||||
|
default = False
|
||||||
|
else:
|
||||||
|
print(f":file_folder: InvokeAI will be installed in {dest}")
|
||||||
|
default = True
|
||||||
|
|
||||||
|
dest_confirmed = Confirm.ask(" Please confirm:", default=default)
|
||||||
|
|
||||||
|
console.line()
|
||||||
|
|
||||||
|
return dest_confirmed
|
||||||
|
|
||||||
|
|
||||||
|
def dest_path(dest=None) -> Path | None:
|
||||||
"""
|
"""
|
||||||
Prompt the user for the destination path and create the path
|
Prompt the user for the destination path and create the path
|
||||||
|
|
||||||
@ -124,37 +162,34 @@ def dest_path(dest=None) -> Path:
|
|||||||
else:
|
else:
|
||||||
dest = Path.cwd().expanduser().resolve()
|
dest = Path.cwd().expanduser().resolve()
|
||||||
prev_dest = init_path = dest
|
prev_dest = init_path = dest
|
||||||
|
dest_confirmed = False
|
||||||
dest_confirmed = confirm_install(dest)
|
|
||||||
|
|
||||||
while not dest_confirmed:
|
while not dest_confirmed:
|
||||||
# if the given destination already exists, the starting point for browsing is its parent directory.
|
browse_start = (dest or Path.cwd()).expanduser().resolve()
|
||||||
# the user may have made a typo, or otherwise wants to place the root dir next to an existing one.
|
|
||||||
# if the destination dir does NOT exist, then the user must have changed their mind about the selection.
|
|
||||||
# since we can't read their mind, start browsing at Path.cwd().
|
|
||||||
browse_start = (prev_dest.parent if prev_dest.exists() else Path.cwd()).expanduser().resolve()
|
|
||||||
|
|
||||||
path_completer = PathCompleter(
|
path_completer = PathCompleter(
|
||||||
only_directories=True,
|
only_directories=True,
|
||||||
expanduser=True,
|
expanduser=True,
|
||||||
get_paths=lambda: [browse_start],
|
get_paths=lambda: [str(browse_start)], # noqa: B023
|
||||||
# get_paths=lambda: [".."].extend(list(browse_start.iterdir()))
|
# get_paths=lambda: [".."].extend(list(browse_start.iterdir()))
|
||||||
)
|
)
|
||||||
|
|
||||||
console.line()
|
console.line()
|
||||||
console.print(f"[orange3]Please select the destination directory for the installation:[/] \\[{browse_start}]: ")
|
|
||||||
|
console.print(f":grey_question: [orange3]Please select the install destination:[/] \\[{browse_start}]: ")
|
||||||
selected = prompt(
|
selected = prompt(
|
||||||
">>> ",
|
">>> ",
|
||||||
complete_in_thread=True,
|
complete_in_thread=True,
|
||||||
completer=path_completer,
|
completer=path_completer,
|
||||||
default=str(browse_start) + os.sep,
|
default=str(browse_start) + os.sep,
|
||||||
vi_mode=True,
|
vi_mode=True,
|
||||||
complete_while_typing=True
|
complete_while_typing=True,
|
||||||
# Test that this is not needed on Windows
|
# Test that this is not needed on Windows
|
||||||
# complete_style=CompleteStyle.READLINE_LIKE,
|
# complete_style=CompleteStyle.READLINE_LIKE,
|
||||||
)
|
)
|
||||||
prev_dest = dest
|
prev_dest = dest
|
||||||
dest = Path(selected)
|
dest = Path(selected)
|
||||||
|
|
||||||
console.line()
|
console.line()
|
||||||
|
|
||||||
dest_confirmed = confirm_install(dest.expanduser().resolve())
|
dest_confirmed = confirm_install(dest.expanduser().resolve())
|
||||||
@ -182,41 +217,45 @@ def dest_path(dest=None) -> Path:
|
|||||||
console.rule("Goodbye!")
|
console.rule("Goodbye!")
|
||||||
|
|
||||||
|
|
||||||
def graphical_accelerator():
|
class GpuType(Enum):
|
||||||
|
CUDA = "cuda"
|
||||||
|
CUDA_AND_DML = "cuda_and_dml"
|
||||||
|
ROCM = "rocm"
|
||||||
|
CPU = "cpu"
|
||||||
|
AUTODETECT = "autodetect"
|
||||||
|
|
||||||
|
|
||||||
|
def select_gpu() -> GpuType:
|
||||||
"""
|
"""
|
||||||
Prompt the user to select the graphical accelerator in their system
|
Prompt the user to select the GPU driver
|
||||||
This does not validate user's choices (yet), but only offers choices
|
|
||||||
valid for the platform.
|
|
||||||
CUDA is the fallback.
|
|
||||||
We may be able to detect the GPU driver by shelling out to `modprobe` or `lspci`,
|
|
||||||
but this is not yet supported or reliable. Also, some users may have exotic preferences.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if ARCH == "arm64" and OS != "Darwin":
|
if ARCH == "arm64" and OS != "Darwin":
|
||||||
print(f"Only CPU acceleration is available on {ARCH} architecture. Proceeding with that.")
|
print(f"Only CPU acceleration is available on {ARCH} architecture. Proceeding with that.")
|
||||||
return "cpu"
|
return GpuType.CPU
|
||||||
|
|
||||||
nvidia = (
|
nvidia = (
|
||||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
|
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
|
||||||
"cuda",
|
GpuType.CUDA,
|
||||||
)
|
)
|
||||||
nvidia_with_dml = (
|
nvidia_with_dml = (
|
||||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™, and DirectML™ for ONNX) -- ALPHA",
|
"an [gold1 b]NVIDIA[/] GPU (using CUDA™, and DirectML™ for ONNX) -- ALPHA",
|
||||||
"cuda_and_dml",
|
GpuType.CUDA_AND_DML,
|
||||||
)
|
)
|
||||||
amd = (
|
amd = (
|
||||||
"an [gold1 b]AMD[/] GPU (using ROCm™)",
|
"an [gold1 b]AMD[/] GPU (using ROCm™)",
|
||||||
"rocm",
|
GpuType.ROCM,
|
||||||
)
|
)
|
||||||
cpu = (
|
cpu = (
|
||||||
"no compatible GPU, or specifically prefer to use the CPU",
|
"Do not install any GPU support, use CPU for generation (slow)",
|
||||||
"cpu",
|
GpuType.CPU,
|
||||||
)
|
)
|
||||||
idk = (
|
autodetect = (
|
||||||
"I'm not sure what to choose",
|
"I'm not sure what to choose",
|
||||||
"idk",
|
GpuType.AUTODETECT,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
options = []
|
||||||
if OS == "Windows":
|
if OS == "Windows":
|
||||||
options = [nvidia, nvidia_with_dml, cpu]
|
options = [nvidia, nvidia_with_dml, cpu]
|
||||||
if OS == "Linux":
|
if OS == "Linux":
|
||||||
@ -230,7 +269,7 @@ def graphical_accelerator():
|
|||||||
return options[0][1]
|
return options[0][1]
|
||||||
|
|
||||||
# "I don't know" is always added the last option
|
# "I don't know" is always added the last option
|
||||||
options.append(idk)
|
options.append(autodetect) # type: ignore
|
||||||
|
|
||||||
options = {str(i): opt for i, opt in enumerate(options, 1)}
|
options = {str(i): opt for i, opt in enumerate(options, 1)}
|
||||||
|
|
||||||
@ -265,9 +304,9 @@ def graphical_accelerator():
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
if options[choice][1] == "idk":
|
if options[choice][1] is GpuType.AUTODETECT:
|
||||||
console.print(
|
console.print(
|
||||||
"No problem. We will try to install a version that [i]should[/i] be compatible. :crossed_fingers:"
|
"No problem. We will install CUDA support first :crossed_fingers: If Invoke does not detect a GPU, please re-run the installer and select one of the other GPU types."
|
||||||
)
|
)
|
||||||
|
|
||||||
return options[choice][1]
|
return options[choice][1]
|
||||||
@ -291,7 +330,7 @@ def windows_long_paths_registry() -> None:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
with open(str(Path(__file__).parent / "WinLongPathsEnabled.reg"), "r", encoding="utf-16le") as code:
|
with open(str(Path(__file__).parent / "WinLongPathsEnabled.reg"), "r", encoding="utf-16le") as code:
|
||||||
syntax = Syntax(code.read(), line_numbers=True)
|
syntax = Syntax(code.read(), line_numbers=True, lexer="regedit")
|
||||||
|
|
||||||
console.print(
|
console.print(
|
||||||
Panel(
|
Panel(
|
||||||
@ -301,7 +340,7 @@ def windows_long_paths_registry() -> None:
|
|||||||
"We will now apply a registry fix to enable long paths on Windows. InvokeAI needs this to function correctly. We are asking your permission to modify the Windows Registry on your behalf.",
|
"We will now apply a registry fix to enable long paths on Windows. InvokeAI needs this to function correctly. We are asking your permission to modify the Windows Registry on your behalf.",
|
||||||
"",
|
"",
|
||||||
"This is the change that will be applied:",
|
"This is the change that will be applied:",
|
||||||
syntax,
|
str(syntax),
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
@ -340,7 +379,7 @@ def introduction() -> None:
|
|||||||
console.line(2)
|
console.line(2)
|
||||||
|
|
||||||
|
|
||||||
def _platform_specific_help() -> str:
|
def _platform_specific_help() -> Text | None:
|
||||||
if OS == "Darwin":
|
if OS == "Darwin":
|
||||||
text = Text.from_markup(
|
text = Text.from_markup(
|
||||||
"""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/]."""
|
"""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/]."""
|
||||||
@ -354,5 +393,5 @@ def _platform_specific_help() -> str:
|
|||||||
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]"""
|
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]"""
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
text = ""
|
return
|
||||||
return text
|
return text
|
||||||
|
@ -4,7 +4,7 @@ Project homepage: https://github.com/invoke-ai/InvokeAI
|
|||||||
|
|
||||||
Preparations:
|
Preparations:
|
||||||
|
|
||||||
You will need to install Python 3.9 or higher for this installer
|
You will need to install Python 3.10 or higher for this installer
|
||||||
to work. Instructions are given here:
|
to work. Instructions are given here:
|
||||||
https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||||
|
|
||||||
@ -14,15 +14,15 @@ Preparations:
|
|||||||
python --version
|
python --version
|
||||||
|
|
||||||
If all is well, it will print "Python 3.X.X", where the version number
|
If all is well, it will print "Python 3.X.X", where the version number
|
||||||
is at least 3.9.*, and not higher than 3.11.*.
|
is at least 3.10.*, and not higher than 3.11.*.
|
||||||
|
|
||||||
If this works, check the version of the Python package manager, pip:
|
If this works, check the version of the Python package manager, pip:
|
||||||
|
|
||||||
pip --version
|
pip --version
|
||||||
|
|
||||||
You should get a message that indicates that the pip package
|
You should get a message that indicates that the pip package
|
||||||
installer was derived from Python 3.9 or 3.10. For example:
|
installer was derived from Python 3.10 or 3.11. For example:
|
||||||
"pip 22.3.1 from /usr/bin/pip (python 3.9)"
|
"pip 22.0.1 from /usr/bin/pip (python 3.10)"
|
||||||
|
|
||||||
Long Paths on Windows:
|
Long Paths on Windows:
|
||||||
|
|
||||||
|
71
installer/tag_release.sh
Executable file
@ -0,0 +1,71 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
BCYAN="\e[1;36m"
|
||||||
|
BYELLOW="\e[1;33m"
|
||||||
|
BGREEN="\e[1;32m"
|
||||||
|
BRED="\e[1;31m"
|
||||||
|
RED="\e[31m"
|
||||||
|
RESET="\e[0m"
|
||||||
|
|
||||||
|
function does_tag_exist {
|
||||||
|
git rev-parse --quiet --verify "refs/tags/$1" >/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
function git_show_ref {
|
||||||
|
git show-ref --dereference $1 --abbrev 7
|
||||||
|
}
|
||||||
|
|
||||||
|
function git_show {
|
||||||
|
git show -s --format='%h %s' $1
|
||||||
|
}
|
||||||
|
|
||||||
|
VERSION=$(
|
||||||
|
cd ..
|
||||||
|
python -c "from invokeai.version import __version__ as version; print(version)"
|
||||||
|
)
|
||||||
|
PATCH=""
|
||||||
|
MAJOR_VERSION=$(echo $VERSION | sed 's/\..*$//')
|
||||||
|
VERSION="v${VERSION}${PATCH}"
|
||||||
|
LATEST_TAG="v${MAJOR_VERSION}-latest"
|
||||||
|
|
||||||
|
if does_tag_exist $VERSION; then
|
||||||
|
echo -e "${BCYAN}${VERSION}${RESET} already exists:"
|
||||||
|
git_show_ref tags/$VERSION
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
if does_tag_exist $LATEST_TAG; then
|
||||||
|
echo -e "${BCYAN}${LATEST_TAG}${RESET} already exists:"
|
||||||
|
git_show_ref tags/$LATEST_TAG
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "${BGREEN}HEAD${RESET}:"
|
||||||
|
git_show
|
||||||
|
echo
|
||||||
|
|
||||||
|
echo -e -n "Create tags ${BCYAN}${VERSION}${RESET} and ${BCYAN}${LATEST_TAG}${RESET} @ ${BGREEN}HEAD${RESET}, ${RED}deleting existing tags on remote${RESET}? "
|
||||||
|
read -e -p 'y/n [n]: ' input
|
||||||
|
RESPONSE=${input:='n'}
|
||||||
|
if [ "$RESPONSE" == 'y' ]; then
|
||||||
|
echo
|
||||||
|
echo -e "Deleting ${BCYAN}${VERSION}${RESET} tag on remote..."
|
||||||
|
git push --delete origin $VERSION
|
||||||
|
|
||||||
|
echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${VERSION}${RESET} locally..."
|
||||||
|
if ! git tag -fa $VERSION; then
|
||||||
|
echo "Existing/invalid tag"
|
||||||
|
exit -1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "Deleting ${BCYAN}${LATEST_TAG}${RESET} tag on remote..."
|
||||||
|
git push --delete origin $LATEST_TAG
|
||||||
|
|
||||||
|
echo -e "Tagging ${BGREEN}HEAD${RESET} with ${BCYAN}${LATEST_TAG}${RESET} locally..."
|
||||||
|
git tag -fa $LATEST_TAG
|
||||||
|
|
||||||
|
echo -e "Pushing updated tags to remote..."
|
||||||
|
git push origin --tags
|
||||||
|
fi
|
||||||
|
exit 0
|
@ -9,41 +9,37 @@ set INVOKEAI_ROOT=.
|
|||||||
:start
|
:start
|
||||||
echo Desired action:
|
echo Desired action:
|
||||||
echo 1. Generate images with the browser-based interface
|
echo 1. Generate images with the browser-based interface
|
||||||
echo 2. Explore InvokeAI nodes using a command-line interface
|
echo 2. Run textual inversion training
|
||||||
echo 3. Run textual inversion training
|
echo 3. Merge models (diffusers type only)
|
||||||
echo 4. Merge models (diffusers type only)
|
echo 4. Download and install models
|
||||||
echo 5. Download and install models
|
echo 5. Change InvokeAI startup options
|
||||||
echo 6. Change InvokeAI startup options
|
echo 6. Re-run the configure script to fix a broken install or to complete a major upgrade
|
||||||
echo 7. Re-run the configure script to fix a broken install or to complete a major upgrade
|
echo 7. Open the developer console
|
||||||
echo 8. Open the developer console
|
echo 8. Update InvokeAI (DEPRECATED - please use the installer)
|
||||||
echo 9. Update InvokeAI
|
echo 9. Run the InvokeAI image database maintenance script
|
||||||
echo 10. Run the InvokeAI image database maintenance script
|
echo 10. Command-line help
|
||||||
echo 11. Command-line help
|
|
||||||
echo Q - Quit
|
echo Q - Quit
|
||||||
set /P choice="Please enter 1-11, Q: [1] "
|
set /P choice="Please enter 1-10, Q: [1] "
|
||||||
if not defined choice set choice=1
|
if not defined choice set choice=1
|
||||||
IF /I "%choice%" == "1" (
|
IF /I "%choice%" == "1" (
|
||||||
echo Starting the InvokeAI browser-based UI..
|
echo Starting the InvokeAI browser-based UI..
|
||||||
python .venv\Scripts\invokeai-web.exe %*
|
python .venv\Scripts\invokeai-web.exe %*
|
||||||
) ELSE IF /I "%choice%" == "2" (
|
) ELSE IF /I "%choice%" == "2" (
|
||||||
echo Starting the InvokeAI command-line..
|
|
||||||
python .venv\Scripts\invokeai.exe %*
|
|
||||||
) ELSE IF /I "%choice%" == "3" (
|
|
||||||
echo Starting textual inversion training..
|
echo Starting textual inversion training..
|
||||||
python .venv\Scripts\invokeai-ti.exe --gui
|
python .venv\Scripts\invokeai-ti.exe --gui
|
||||||
) ELSE IF /I "%choice%" == "4" (
|
) ELSE IF /I "%choice%" == "3" (
|
||||||
echo Starting model merging script..
|
echo Starting model merging script..
|
||||||
python .venv\Scripts\invokeai-merge.exe --gui
|
python .venv\Scripts\invokeai-merge.exe --gui
|
||||||
) ELSE IF /I "%choice%" == "5" (
|
) ELSE IF /I "%choice%" == "4" (
|
||||||
echo Running invokeai-model-install...
|
echo Running invokeai-model-install...
|
||||||
python .venv\Scripts\invokeai-model-install.exe
|
python .venv\Scripts\invokeai-model-install.exe
|
||||||
) ELSE IF /I "%choice%" == "6" (
|
) ELSE IF /I "%choice%" == "5" (
|
||||||
echo Running invokeai-configure...
|
echo Running invokeai-configure...
|
||||||
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
|
python .venv\Scripts\invokeai-configure.exe --skip-sd-weight --skip-support-models
|
||||||
) ELSE IF /I "%choice%" == "7" (
|
) ELSE IF /I "%choice%" == "6" (
|
||||||
echo Running invokeai-configure...
|
echo Running invokeai-configure...
|
||||||
python .venv\Scripts\invokeai-configure.exe --yes --skip-sd-weight
|
python .venv\Scripts\invokeai-configure.exe --yes --skip-sd-weight
|
||||||
) ELSE IF /I "%choice%" == "8" (
|
) ELSE IF /I "%choice%" == "7" (
|
||||||
echo Developer Console
|
echo Developer Console
|
||||||
echo Python command is:
|
echo Python command is:
|
||||||
where python
|
where python
|
||||||
@ -55,13 +51,15 @@ IF /I "%choice%" == "1" (
|
|||||||
echo *************************
|
echo *************************
|
||||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||||
call cmd /k
|
call cmd /k
|
||||||
) ELSE IF /I "%choice%" == "9" (
|
) ELSE IF /I "%choice%" == "8" (
|
||||||
echo Running invokeai-update...
|
echo UPDATING FROM WITHIN THE APP IS BEING DEPRECATED.
|
||||||
|
echo Please download the installer from https://github.com/invoke-ai/InvokeAI/releases/latest and run it to update your installation.
|
||||||
|
timeout 4
|
||||||
python -m invokeai.frontend.install.invokeai_update
|
python -m invokeai.frontend.install.invokeai_update
|
||||||
) ELSE IF /I "%choice%" == "10" (
|
) ELSE IF /I "%choice%" == "9" (
|
||||||
echo Running the db maintenance script...
|
echo Running the db maintenance script...
|
||||||
python .venv\Scripts\invokeai-db-maintenance.exe
|
python .venv\Scripts\invokeai-db-maintenance.exe
|
||||||
) ELSE IF /I "%choice%" == "11" (
|
) ELSE IF /I "%choice%" == "10" (
|
||||||
echo Displaying command line help...
|
echo Displaying command line help...
|
||||||
python .venv\Scripts\invokeai-web.exe --help %*
|
python .venv\Scripts\invokeai-web.exe --help %*
|
||||||
pause
|
pause
|
||||||
@ -81,4 +79,3 @@ pause
|
|||||||
|
|
||||||
:ending
|
:ending
|
||||||
exit /b
|
exit /b
|
||||||
|
|
||||||
|