mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
9d6d728b51
commit1c649e4663
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Mon Sep 12 13:29:16 2022 -0400 fix torchvision dependency version #511 commit4d197f699e
Merge:a3e07fb
190ba78
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Mon Sep 12 07:29:19 2022 -0400 Merge branch 'development' of github.com:lstein/stable-diffusion into development commita3e07fb84a
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Mon Sep 12 07:28:58 2022 -0400 fix grid crash commit9fa1f31bf2
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Mon Sep 12 07:07:05 2022 -0400 fix opencv and realesrgan dependencies in mac install commit190ba78960
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Mon Sep 12 01:50:58 2022 -0400 Update requirements-mac.txt Fixed dangling dash on last line. commit25d9ccc509
Author: Any-Winter-4079 <50542132+Any-Winter-4079@users.noreply.github.com> Date: Mon Sep 12 03:17:29 2022 +0200 Update model.py commit9cdf3aca7d
Author: Any-Winter-4079 <50542132+Any-Winter-4079@users.noreply.github.com> Date: Mon Sep 12 02:52:36 2022 +0200 Update attention.py Performance improvements to generate larger images in M1 #431 Update attention.py Added dtype=r1.dtype to softmax commit49a96b90d8
Author: Mihai <299015+mh-dm@users.noreply.github.com> Date: Sat Sep 10 16:58:07 2022 +0300 ~7% speedup (1.57 to 1.69it/s) from switch to += in ldm.modules.attention. (#482) Tested on 8GB eGPU nvidia setup so YMMV. 512x512 output, max VRAM stays same. commitaba94b85e8
Author: Niek van der Maas <mail@niekvandermaas.nl> Date: Fri Sep 9 15:01:37 2022 +0200 Fix macOS `pyenv` instructions, add code block highlight (#441) Fix: `anaconda3-latest` does not work, specify the correct virtualenv, add missing init. commitaac5102cf3
Author: Henry van Megen <h.vanmegen@gmail.com> Date: Thu Sep 8 05:16:35 2022 +0200 Disabled debug output (#436) Co-authored-by: Henry van Megen <hvanmegen@gmail.com> commit0ab5a36464
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 17:19:46 2022 -0400 fix missing lines in outputs commit5e433728b5
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 16:20:14 2022 -0400 upped max_steps in v1-finetune.yaml and fixed TI docs to address #493 commit7708f4fb98
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 16:03:37 2022 -0400 slight efficiency gain by using += in attention.py commitb86a1deb00
Author: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon Sep 12 07:47:12 2022 +1200 Remove print statement styling (#504) Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com> commit4951e66103
Author: chromaticist <mhostick@gmail.com> Date: Sun Sep 11 12:44:26 2022 -0700 Adding support for .bin files from huggingface concepts (#498) * Adding support for .bin files from huggingface concepts * Updating documentation to include huggingface .bin info commit79b445b0ca
Merge: a323070f7662c1
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 15:39:38 2022 -0400 Merge branch 'development' of github.com:lstein/stable-diffusion into development commita323070a4d
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 15:28:57 2022 -0400 update requirements for new location of gfpgan commitf7662c1808
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 15:00:24 2022 -0400 update requirements for changed location of gfpgan commit93c242c9fb
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 14:47:58 2022 -0400 make gfpgan_model_exists flag available to web interface commitc7c6cd7735
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 14:43:07 2022 -0400 Update UPSCALE.md New instructions needed to accommodate fact that the ESRGAN and GFPGAN packages are now installed by environment.yaml. commit77ca83e103
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 14:31:56 2022 -0400 Update CLI.md Final documentation tweak. commit0ea145d188
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 14:29:26 2022 -0400 Update CLI.md More doc fixes. commit162285ae86
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 14:28:45 2022 -0400 Update CLI.md Minor documentation fix commit37c921dfe2
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 14:26:41 2022 -0400 documentation enhancements commit4f72cb44ad
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 13:05:38 2022 -0400 moved the notebook files into their own directory commit878ef2e9e0
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 12:58:06 2022 -0400 documentation tweaks commit4923118610
Merge:16f6a67
defafc0
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 12:51:25 2022 -0400 Merge branch 'development' of github.com:lstein/stable-diffusion into development commitdefafc0e8e
Author: Dominic Letz <dominic@diode.io> Date: Sun Sep 11 18:51:01 2022 +0200 Enable upscaling on m1 (#474) commit16f6a6731d
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 12:47:26 2022 -0400 install GFPGAN inside SD repository in order to fix 'dark cast' issue #169 commit0881d429f2
Author: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon Sep 12 03:52:43 2022 +1200 Docs Update (#466) Authored-by: @blessedcoolant Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com> commit9a29d442b4
Author: Gérald LONLAS <gerald@lonlas.com> Date: Sun Sep 11 23:23:18 2022 +0800 Revert "Add 3x Upscale option on the Web UI (#442)" (#488) This reverts commitf8a540881c
. commitd301836fbd
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 10:52:19 2022 -0400 can select prior output for init_img using -1, -2, etc commit70aa674e9e
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 10:34:06 2022 -0400 merge PR #495 - keep using float16 in ldm.modules.attention commit8748370f44
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 10:22:32 2022 -0400 negative -S indexing recovers correct previous seed; closes issue #476 commit839e30e4b8
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 11 10:02:44 2022 -0400 improve CUDA VRAM monitoring extra check that device==cuda before getting VRAM stats commitbfb2781279
Author: tildebyte <337875+tildebyte@users.noreply.github.com> Date: Sat Sep 10 10:15:56 2022 -0400 fix(readme): add note about updating env via conda (#475) commit5c43988862
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sat Sep 10 10:02:43 2022 -0400 reduce VRAM memory usage by half during model loading * This moves the call to half() before model.to(device) to avoid GPU copy of full model. Improves speed and reduces memory usage dramatically * This fix contributed by @mh-dm (Mihai) commit99122708ca
Merge:817c4a2
ecc6b75
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sat Sep 10 09:54:34 2022 -0400 Merge branch 'development' of github.com:lstein/stable-diffusion into development commit817c4a26de
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sat Sep 10 09:53:27 2022 -0400 remove -F option from normalized prompt; closes #483 commitecc6b75a3e
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sat Sep 10 09:53:27 2022 -0400 remove -F option from normalized prompt commit723d074442
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Fri Sep 9 18:49:51 2022 -0400 Allow ctrl c when using --from_file (#472) * added ansi escapes to highlight key parts of CLI session * adjust exception handling so that ^C will abort when reading prompts from a file commit75f633cda8
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Fri Sep 9 12:03:45 2022 -0400 re-add new logo commit10db192cc4
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Fri Sep 9 09:26:10 2022 -0400 changes to dogettx optimizations to run on m1 * Author @any-winter-4079 * Author @dogettx Thanks to many individuals who contributed time and hardware to benchmarking and debugging these changes. commitc85ae00b33
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Thu Sep 8 23:57:45 2022 -0400 fix bug which caused seed to get "stuck" on previous image even when UI specified -1 commit1b5aae3ef3
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Thu Sep 8 22:36:47 2022 -0400 add icon to dream web server commit6abf739315
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Thu Sep 8 22:25:09 2022 -0400 add favicon to web server commitdb825b8138
Merge:33874ba
afee7f9
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Thu Sep 8 22:17:37 2022 -0400 Merge branch 'deNULL-development' into development commit33874bae8d
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Thu Sep 8 22:16:29 2022 -0400 Squashed commit of the following: commitafee7f9cea
Merge:6531446
171f8db
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Thu Sep 8 22:14:32 2022 -0400 Merge branch 'development' of github.com:deNULL/stable-diffusion into deNULL-development commit171f8db742
Author: Denis Olshin <me@denull.ru> Date: Thu Sep 8 03:15:20 2022 +0300 saving full prompt to metadata when using web ui commitd7e67b62f0
Author: Denis Olshin <me@denull.ru> Date: Thu Sep 8 01:51:47 2022 +0300 better logic for clicking to make variations commitafee7f9cea
Merge:6531446
171f8db
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Thu Sep 8 22:14:32 2022 -0400 Merge branch 'development' of github.com:deNULL/stable-diffusion into deNULL-development commit653144694f
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Thu Sep 8 20:41:37 2022 -0400 work around unexplained crash when timesteps=1000 (#440) * work around unexplained crash when timesteps=1000 * this fix seems to work commitc33a84cdfd
Author: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Fri Sep 9 12:39:51 2022 +1200 Add New Logo (#454) * Add instructions on how to install alongside pyenv (#393) Like probably many others, I have a lot of different virtualenvs, one for each project. Most of them are handled by `pyenv`. After installing according to these instructions I had issues with ´pyenv`and `miniconda` fighting over the $PATH of my system. But then I stumbled upon this nice solution on SO: https://stackoverflow.com/a/73139031 , upon which I have based my suggested changes. It runs perfectly on my M1 setup, with the anaconda setup as a virtual environment handled by pyenv. Feel free to incorporate these instructions as you see fit. Thanks a million for all your hard work. * Disabled debug output (#436) Co-authored-by: Henry van Megen <hvanmegen@gmail.com> * Add New Logo Co-authored-by: Håvard Gulldahl <havard@lurtgjort.no> Co-authored-by: Henry van Megen <h.vanmegen@gmail.com> Co-authored-by: Henry van Megen <hvanmegen@gmail.com> Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com> commitf8a540881c
Author: Gérald LONLAS <gerald@lonlas.com> Date: Fri Sep 9 01:45:54 2022 +0800 Add 3x Upscale option on the Web UI (#442) commit244239e5f6
Author: James Reynolds <magnusviri@users.noreply.github.com> Date: Thu Sep 8 05:36:33 2022 -0600 macOS CI workflow, dream.py exits with an error, but the workflow com… (#396) * macOS CI workflow, dream.py exits with an error, but the workflow completes. * Files for testing Co-authored-by: James Reynolds <magnsuviri@me.com> Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com> commit711d49ed30
Author: James Reynolds <magnusviri@users.noreply.github.com> Date: Thu Sep 8 05:35:08 2022 -0600 Cache model workflow (#394) * Add workflow that caches the model, step 1 for CI * Change name of workflow job Co-authored-by: James Reynolds <magnsuviri@me.com> Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com> commit7996a30e3a
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Thu Sep 8 07:34:03 2022 -0400 add auto-creation of mask for inpainting (#438) * now use a single init image for both image and mask * turn on debugging for now to write out mask and image * add back -M option as a fallback commita69ca31f34
Author: elliotsayes <elliotsayes@gmail.com> Date: Thu Sep 8 15:30:06 2022 +1200 .gitignore WebUI temp files (#430) * Add instructions on how to install alongside pyenv (#393) Like probably many others, I have a lot of different virtualenvs, one for each project. Most of them are handled by `pyenv`. After installing according to these instructions I had issues with ´pyenv`and `miniconda` fighting over the $PATH of my system. But then I stumbled upon this nice solution on SO: https://stackoverflow.com/a/73139031 , upon which I have based my suggested changes. It runs perfectly on my M1 setup, with the anaconda setup as a virtual environment handled by pyenv. Feel free to incorporate these instructions as you see fit. Thanks a million for all your hard work. * .gitignore WebUI temp files Co-authored-by: Håvard Gulldahl <havard@lurtgjort.no> commit5c6b612a72
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Wed Sep 7 22:50:55 2022 -0400 fix bug that caused same seed to be redisplayed repeatedly commit56f155c590
Author: Johan Roxendal <johan@roxendal.com> Date: Thu Sep 8 04:50:06 2022 +0200 added support for parsing run log and displaying images in the frontend init state (#410) Co-authored-by: Johan Roxendal <johan.roxendal@litteraturbanken.se> Co-authored-by: Lincoln Stein <lincoln.stein@gmail.com> commit41687746be
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Wed Sep 7 20:24:35 2022 -0400 added missing initialization of latent_noise to None commit171f8db742
Author: Denis Olshin <me@denull.ru> Date: Thu Sep 8 03:15:20 2022 +0300 saving full prompt to metadata when using web ui commitd7e67b62f0
Author: Denis Olshin <me@denull.ru> Date: Thu Sep 8 01:51:47 2022 +0300 better logic for clicking to make variations commitd1d044aa87
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Wed Sep 7 17:56:59 2022 -0400 actual image seed now written into web log rather than -1 (#428) commitedada042b3
Author: Arturo Mendivil <60411196+artmen1516@users.noreply.github.com> Date: Wed Sep 7 10:42:26 2022 -0700 Improve notebook and add requirements file (#422) commit29ab3c2028
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Wed Sep 7 13:28:11 2022 -0400 disable neonpixel optimizations on M1 hardware (#414) * disable neonpixel optimizations on M1 hardware * fix typo that was causing random noise images on m1 commit7670ecc63f
Author: cody <cnmizell@gmail.com> Date: Wed Sep 7 12:24:41 2022 -0500 add more keyboard support on the web server (#391) add ability to submit prompts with the "enter" key add ability to cancel generations with the "escape" key commitdd2aedacaf
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Wed Sep 7 13:23:53 2022 -0400 report VRAM usage stats during initial model loading (#419) commitf6284777e6
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Tue Sep 6 17:12:39 2022 -0400 Squashed commit of the following: commit 7d1344282d942a33dcecda4d5144fc154ec82915 Merge:caf4ea3
ebeb556
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Mon Sep 5 10:07:27 2022 -0400 Merge branch 'development' of github.com:WebDev9000/stable-diffusion into WebDev9000-development commitebeb556af9
Author: Web Dev 9000 <rirath@gmail.com> Date: Sun Sep 4 18:05:15 2022 -0700 Fixed unintentionally removed lines commitff2c4b9a1b
Author: Web Dev 9000 <rirath@gmail.com> Date: Sun Sep 4 17:50:13 2022 -0700 Add ability to recreate variations via image click commitc012929cda
Author: Web Dev 9000 <rirath@gmail.com> Date: Sun Sep 4 14:35:33 2022 -0700 Add files via upload commit02a6018992
Author: Web Dev 9000 <rirath@gmail.com> Date: Sun Sep 4 14:35:07 2022 -0700 Add files via upload commiteef788981c
Author: Olivier Louvignes <olivier@mg-crea.com> Date: Tue Sep 6 12:41:08 2022 +0200 feat(txt2img): allow from_file to work with len(lines) < batch_size (#349) commit720e5cd651
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Mon Sep 5 20:40:10 2022 -0400 Refactoring simplet2i (#387) * start refactoring -not yet functional * first phase of refactor done - not sure weighted prompts working * Second phase of refactoring. Everything mostly working. * The refactoring has moved all the hard-core inference work into ldm.dream.generator.*, where there are submodules for txt2img and img2img. inpaint will go in there as well. * Some additional refactoring will be done soon, but relatively minor work. * fix -save_orig flag to actually work * add @neonsecret attention.py memory optimization * remove unneeded imports * move token logging into conditioning.py * add placeholder version of inpaint; porting in progress * fix crash in img2img * inpainting working; not tested on variations * fix crashes in img2img * ported attention.py memory optimization #117 from basujindal branch * added @torch_no_grad() decorators to img2img, txt2img, inpaint closures * Final commit prior to PR against development * fixup crash when generating intermediate images in web UI * rename ldm.simplet2i to ldm.generate * add backward-compatibility simplet2i shell with deprecation warning * add back in mps exception, addresses @vargol comment in #354 * replaced Conditioning class with exported functions * fix wrong type of with_variations attribute during intialization * changed "image_iterator()" to "get_make_image()" * raise NotImplementedError for calling get_make_image() in parent class * Update ldm/generate.py better error message Co-authored-by: Kevin Gibbons <bakkot@gmail.com> * minor stylistic fixes and assertion checks from code review * moved get_noise() method into img2img class * break get_noise() into two methods, one for txt2img and the other for img2img * inpainting works on non-square images now * make get_noise() an abstract method in base class * much improved inpainting Co-authored-by: Kevin Gibbons <bakkot@gmail.com> commit1ad2a8e567
Author: thealanle <35761977+thealanle@users.noreply.github.com> Date: Mon Sep 5 17:35:04 2022 -0700 Fix --outdir function for web (#373) * Fix --outdir function for web * Removed unnecessary hardcoded path commit52d8bb2836
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Mon Sep 5 10:31:59 2022 -0400 Squashed commit of the following: commit 0cd48e932f1326e000c46f4140f98697eb9bdc79 Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Mon Sep 5 10:27:43 2022 -0400 resolve conflicts with development commitd7bc8c12e0
Author: Scott McMillin <scott@scottmcmillin.com> Date: Sun Sep 4 18:52:09 2022 -0500 Add title attribute back to img tag commit5397c89184
Author: Scott McMillin <scott@scottmcmillin.com> Date: Sun Sep 4 13:49:46 2022 -0500 Remove temp code commit1da080b509
Author: Scott McMillin <scott@scottmcmillin.com> Date: Sun Sep 4 13:33:56 2022 -0500 Cleaned up HTML; small style changes; image click opens image; add seed to figcaption beneath image commitcaf4ea3d89
Author: Adam Rice <adam@askadam.io> Date: Mon Sep 5 10:05:39 2022 -0400 Add a 'Remove Image' button to clear the file upload field (#382) * added "remove image" button * styled a new "remove image" button * Update index.js commit95c088b303
Author: Kevin Gibbons <bakkot@gmail.com> Date: Sun Sep 4 19:04:14 2022 -0700 Revert "Add CORS headers to dream server to ease integration with third-party web interfaces" (#371) This reverts commit91e826e5f4
. commita20113d5a3
Author: Kevin Gibbons <bakkot@gmail.com> Date: Sun Sep 4 18:59:12 2022 -0700 put no_grad decorator on make_image closures (#375) commit0f93dadd6a
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 4 21:39:15 2022 -0400 fix several dangling references to --gfpgan option, which no longer exists commitf4004f660e
Author: tildebyte <337875+tildebyte@users.noreply.github.com> Date: Sun Sep 4 19:43:04 2022 -0400 TOIL(requirements): Split requirements to per-platform (#355) * toil(reqs): split requirements to per-platform Signed-off-by: Ben Alkov <ben.alkov@gmail.com> * toil(reqs): fix for Win and Lin... ...allow pip to resolve latest torch, numpy Signed-off-by: Ben Alkov <ben.alkov@gmail.com> * toil(install): update reqs in Win install notebook Signed-off-by: Ben Alkov <ben.alkov@gmail.com> Signed-off-by: Ben Alkov <ben.alkov@gmail.com> commit4406fd138d
Merge:5116c81
fd7a72e
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 4 08:23:53 2022 -0400 Merge branch 'SebastianAigner-main' into development Add support for full CORS headers for dream server. commitfd7a72e147
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 4 08:23:11 2022 -0400 remove debugging message commit3a2be621f3
Merge:91e826e
5116c81
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sun Sep 4 08:15:51 2022 -0400 Merge branch 'development' into main commit5116c8178c
Author: Justin Wong <1584142+wongjustin99@users.noreply.github.com> Date: Sun Sep 4 07:17:58 2022 -0400 fix save_original flag saving to the same filename (#360) * Update README.md with new Anaconda install steps (#347) pip3 version did not work for me and this is the recommended way to install Anaconda now it seems * fix save_original flag saving to the same filename Before this, the `--save_orig` flag was not working. The upscaled/GFPGAN would overwrite the original output image. Co-authored-by: greentext2 <112735219+greentext2@users.noreply.github.com> commit91e826e5f4
Author: Sebastian Aigner <SebastianAigner@users.noreply.github.com> Date: Sun Sep 4 10:22:54 2022 +0200 Add CORS headers to dream server to ease integration with third-party web interfaces commit6266d9e8d6
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sat Sep 3 15:45:20 2022 -0400 remove stray debugging message commit138956e516
Author: greentext2 <112735219+greentext2@users.noreply.github.com> Date: Sat Sep 3 13:38:57 2022 -0500 Update README.md with new Anaconda install steps (#347) pip3 version did not work for me and this is the recommended way to install Anaconda now it seems commit60be735e80
Author: Cora Johnson-Roberson <cora.johnson.roberson@gmail.com> Date: Sat Sep 3 14:28:34 2022 -0400 Switch to regular pytorch channel and restore Python 3.10 for Macs. (#301) * Switch to regular pytorch channel and restore Python 3.10 for Macs. Although pytorch-nightly should in theory be faster, it is currently causing increased memory usage and slower iterations: https://github.com/lstein/stable-diffusion/pull/283#issuecomment-1234784885 This changes the environment-mac.yaml file back to the regular pytorch channel and moves the `transformers` dep into pip for now (since it cannot be satisfied until tokenizers>=0.11 is built for Python 3.10). * Specify versions for Pip packages as well. commitd0d95d3a2a
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sat Sep 3 14:10:31 2022 -0400 make initimg appear in web log commitb90a215000
Merge:1eee811
6270e31
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sat Sep 3 13:47:15 2022 -0400 Merge branch 'prixt-seamless' into development commit6270e313b8
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sat Sep 3 13:46:29 2022 -0400 add credit to prixt for seamless circular tiling commita01b7bdc40
Merge:1eee811
9d88abe
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sat Sep 3 13:43:04 2022 -0400 add web interface for seamless option commit1eee8111b9
Merge:64eca42
fb857f0
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sat Sep 3 12:33:39 2022 -0400 Merge branch 'development' of github.com:lstein/stable-diffusion into development commit64eca42610
Merge:9130ad7
21a1f68
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sat Sep 3 12:33:05 2022 -0400 Merge branch 'main' into development * brings in small documentation fixes that were added directly to main during release tweaking. commitfb857f05ba
Author: Lincoln Stein <lincoln.stein@gmail.com> Date: Sat Sep 3 12:07:07 2022 -0400 fix typo in docs commit9d88abe2ea
Author: prixt <paraxite@naver.com> Date: Sat Sep 3 22:42:16 2022 +0900 fixed typo commita61e49bc97
Author: prixt <paraxite@naver.com> Date: Sat Sep 3 22:39:35 2022 +0900 * Removed unnecessary code * Added description about --seamless commit02bee4fdb1
Author: prixt <paraxite@naver.com> Date: Sat Sep 3 16:08:03 2022 +0900 added --seamless tag logging to normalize_prompt commitd922b53c26
Author: prixt <paraxite@naver.com> Date: Sat Sep 3 15:13:31 2022 +0900 added seamless tiling mode and commands
273 lines
8.8 KiB
Python
273 lines
8.8 KiB
Python
from cmath import log
|
|
import torch
|
|
from torch import nn
|
|
|
|
import sys
|
|
|
|
from ldm.data.personalized import per_img_token_list
|
|
from transformers import CLIPTokenizer
|
|
from functools import partial
|
|
|
|
DEFAULT_PLACEHOLDER_TOKEN = ['*']
|
|
|
|
PROGRESSIVE_SCALE = 2000
|
|
|
|
|
|
def get_clip_token_for_string(tokenizer, string):
|
|
batch_encoding = tokenizer(
|
|
string,
|
|
truncation=True,
|
|
max_length=77,
|
|
return_length=True,
|
|
return_overflowing_tokens=False,
|
|
padding='max_length',
|
|
return_tensors='pt',
|
|
)
|
|
tokens = batch_encoding['input_ids']
|
|
""" assert (
|
|
torch.count_nonzero(tokens - 49407) == 2
|
|
), f"String '{string}' maps to more than a single token. Please use another string" """
|
|
|
|
return tokens[0, 1]
|
|
|
|
|
|
def get_bert_token_for_string(tokenizer, string):
|
|
token = tokenizer(string)
|
|
# assert torch.count_nonzero(token) == 3, f"String '{string}' maps to more than a single token. Please use another string"
|
|
|
|
token = token[0, 1]
|
|
|
|
return token
|
|
|
|
|
|
def get_embedding_for_clip_token(embedder, token):
|
|
return embedder(token.unsqueeze(0))[0, 0]
|
|
|
|
|
|
class EmbeddingManager(nn.Module):
|
|
def __init__(
|
|
self,
|
|
embedder,
|
|
placeholder_strings=None,
|
|
initializer_words=None,
|
|
per_image_tokens=False,
|
|
num_vectors_per_token=1,
|
|
progressive_words=False,
|
|
**kwargs,
|
|
):
|
|
super().__init__()
|
|
|
|
self.embedder = embedder
|
|
|
|
self.string_to_token_dict = {}
|
|
self.string_to_param_dict = nn.ParameterDict()
|
|
|
|
self.initial_embeddings = (
|
|
nn.ParameterDict()
|
|
) # These should not be optimized
|
|
|
|
self.progressive_words = progressive_words
|
|
self.progressive_counter = 0
|
|
|
|
self.max_vectors_per_token = num_vectors_per_token
|
|
|
|
if hasattr(
|
|
embedder, 'tokenizer'
|
|
): # using Stable Diffusion's CLIP encoder
|
|
self.is_clip = True
|
|
get_token_for_string = partial(
|
|
get_clip_token_for_string, embedder.tokenizer
|
|
)
|
|
get_embedding_for_tkn = partial(
|
|
get_embedding_for_clip_token,
|
|
embedder.transformer.text_model.embeddings,
|
|
)
|
|
token_dim = 1280
|
|
else: # using LDM's BERT encoder
|
|
self.is_clip = False
|
|
get_token_for_string = partial(
|
|
get_bert_token_for_string, embedder.tknz_fn
|
|
)
|
|
get_embedding_for_tkn = embedder.transformer.token_emb
|
|
token_dim = 1280
|
|
|
|
if per_image_tokens:
|
|
placeholder_strings.extend(per_img_token_list)
|
|
|
|
for idx, placeholder_string in enumerate(placeholder_strings):
|
|
|
|
token = get_token_for_string(placeholder_string)
|
|
|
|
if initializer_words and idx < len(initializer_words):
|
|
init_word_token = get_token_for_string(initializer_words[idx])
|
|
|
|
with torch.no_grad():
|
|
init_word_embedding = get_embedding_for_tkn(
|
|
init_word_token.cpu()
|
|
)
|
|
|
|
token_params = torch.nn.Parameter(
|
|
init_word_embedding.unsqueeze(0).repeat(
|
|
num_vectors_per_token, 1
|
|
),
|
|
requires_grad=True,
|
|
)
|
|
self.initial_embeddings[
|
|
placeholder_string
|
|
] = torch.nn.Parameter(
|
|
init_word_embedding.unsqueeze(0).repeat(
|
|
num_vectors_per_token, 1
|
|
),
|
|
requires_grad=False,
|
|
)
|
|
else:
|
|
token_params = torch.nn.Parameter(
|
|
torch.rand(
|
|
size=(num_vectors_per_token, token_dim),
|
|
requires_grad=True,
|
|
)
|
|
)
|
|
|
|
self.string_to_token_dict[placeholder_string] = token
|
|
self.string_to_param_dict[placeholder_string] = token_params
|
|
|
|
def forward(
|
|
self,
|
|
tokenized_text,
|
|
embedded_text,
|
|
):
|
|
b, n, device = *tokenized_text.shape, tokenized_text.device
|
|
|
|
for (
|
|
placeholder_string,
|
|
placeholder_token,
|
|
) in self.string_to_token_dict.items():
|
|
|
|
placeholder_embedding = self.string_to_param_dict[
|
|
placeholder_string
|
|
].to(device)
|
|
|
|
if (
|
|
self.max_vectors_per_token == 1
|
|
): # If there's only one vector per token, we can do a simple replacement
|
|
placeholder_idx = torch.where(
|
|
tokenized_text == placeholder_token.to(device)
|
|
)
|
|
embedded_text[placeholder_idx] = placeholder_embedding
|
|
else: # otherwise, need to insert and keep track of changing indices
|
|
if self.progressive_words:
|
|
self.progressive_counter += 1
|
|
max_step_tokens = (
|
|
1 + self.progressive_counter // PROGRESSIVE_SCALE
|
|
)
|
|
else:
|
|
max_step_tokens = self.max_vectors_per_token
|
|
|
|
num_vectors_for_token = min(
|
|
placeholder_embedding.shape[0], max_step_tokens
|
|
)
|
|
|
|
placeholder_rows, placeholder_cols = torch.where(
|
|
tokenized_text == placeholder_token.to(device)
|
|
)
|
|
|
|
if placeholder_rows.nelement() == 0:
|
|
continue
|
|
|
|
sorted_cols, sort_idx = torch.sort(
|
|
placeholder_cols, descending=True
|
|
)
|
|
sorted_rows = placeholder_rows[sort_idx]
|
|
|
|
for idx in range(len(sorted_rows)):
|
|
row = sorted_rows[idx]
|
|
col = sorted_cols[idx]
|
|
|
|
new_token_row = torch.cat(
|
|
[
|
|
tokenized_text[row][:col],
|
|
placeholder_token.repeat(num_vectors_for_token).to(
|
|
device
|
|
),
|
|
tokenized_text[row][col + 1 :],
|
|
],
|
|
axis=0,
|
|
)[:n]
|
|
new_embed_row = torch.cat(
|
|
[
|
|
embedded_text[row][:col],
|
|
placeholder_embedding[:num_vectors_for_token],
|
|
embedded_text[row][col + 1 :],
|
|
],
|
|
axis=0,
|
|
)[:n]
|
|
|
|
embedded_text[row] = new_embed_row
|
|
tokenized_text[row] = new_token_row
|
|
|
|
return embedded_text
|
|
|
|
def save(self, ckpt_path):
|
|
torch.save(
|
|
{
|
|
'string_to_token': self.string_to_token_dict,
|
|
'string_to_param': self.string_to_param_dict,
|
|
},
|
|
ckpt_path,
|
|
)
|
|
|
|
def load(self, ckpt_path, full=True):
|
|
ckpt = torch.load(ckpt_path, map_location='cpu')
|
|
|
|
# Handle .pt textual inversion files
|
|
if 'string_to_token' in ckpt and 'string_to_param' in ckpt:
|
|
self.string_to_token_dict = ckpt["string_to_token"]
|
|
self.string_to_param_dict = ckpt["string_to_param"]
|
|
|
|
# Handle .bin textual inversion files from Huggingface Concepts
|
|
# https://huggingface.co/sd-concepts-library
|
|
else:
|
|
for token_str in list(ckpt.keys()):
|
|
token = get_clip_token_for_string(self.embedder.tokenizer, token_str)
|
|
self.string_to_token_dict[token_str] = token
|
|
ckpt[token_str] = torch.nn.Parameter(ckpt[token_str])
|
|
|
|
self.string_to_param_dict.update(ckpt)
|
|
|
|
if not full:
|
|
for key, value in self.string_to_param_dict.items():
|
|
self.string_to_param_dict[key] = torch.nn.Parameter(value.half())
|
|
|
|
print(f'Added terms: {", ".join(self.string_to_param_dict.keys())}')
|
|
|
|
def get_embedding_norms_squared(self):
|
|
all_params = torch.cat(
|
|
list(self.string_to_param_dict.values()), axis=0
|
|
) # num_placeholders x embedding_dim
|
|
param_norm_squared = (all_params * all_params).sum(
|
|
axis=-1
|
|
) # num_placeholders
|
|
|
|
return param_norm_squared
|
|
|
|
def embedding_parameters(self):
|
|
return self.string_to_param_dict.parameters()
|
|
|
|
def embedding_to_coarse_loss(self):
|
|
|
|
loss = 0.0
|
|
num_embeddings = len(self.initial_embeddings)
|
|
|
|
for key in self.initial_embeddings:
|
|
optimized = self.string_to_param_dict[key]
|
|
coarse = self.initial_embeddings[key].clone().to(optimized.device)
|
|
|
|
loss = (
|
|
loss
|
|
+ (optimized - coarse)
|
|
@ (optimized - coarse).T
|
|
/ num_embeddings
|
|
)
|
|
|
|
return loss
|