From dbc8fc79008795875eb22ebf0c57927061af86bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?H=C3=A5vard=20Gulldahl?= Date: Tue, 6 Sep 2022 13:07:10 +0200 Subject: [PATCH 1/6] Add instructions on how to install alongside pyenv (#393) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Like probably many others, I have a lot of different virtualenvs, one for each project. Most of them are handled by `pyenv`. After installing according to these instructions I had issues with ´pyenv`and `miniconda` fighting over the $PATH of my system. But then I stumbled upon this nice solution on SO: https://stackoverflow.com/a/73139031 , upon which I have based my suggested changes. It runs perfectly on my M1 setup, with the anaconda setup as a virtual environment handled by pyenv. Feel free to incorporate these instructions as you see fit. Thanks a million for all your hard work. --- README-Mac-MPS.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/README-Mac-MPS.md b/README-Mac-MPS.md index 04d513cf8c..ef103d6b45 100644 --- a/README-Mac-MPS.md +++ b/README-Mac-MPS.md @@ -32,6 +32,23 @@ While that is downloading, open Terminal and run the following commands one at a # install brew (and Xcode command line tools): /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +# +# Now there are two different routes to get the Python (miniconda) environment up and running: +# 1. Alongside pyenv +# 2. No pyenv +# +# If you don't know what we are talking about, choose 2. +# +# NOW EITHER DO +# 1. Installing alongside pyenv + +brew install pyenv-virtualenv # you might have this from before, no problem +pyenv install anaconda3-latest +pyenv virtualenv anaconda3-latest lstein-stable-diffusion +pyenv activate lstein-stable-diffusion + +# OR, +# 2. Installing standalone # install python 3, git, cmake, protobuf: brew install cmake protobuf rust @@ -39,6 +56,10 @@ brew install cmake protobuf rust curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o Miniconda3-latest-MacOSX-arm64.sh /bin/bash Miniconda3-latest-MacOSX-arm64.sh + +# EITHER WAY, +# continue from here + # clone the repo git clone https://github.com/lstein/stable-diffusion.git cd stable-diffusion From 049ea02fc7e2e0e77908e08455e32f2028e232e9 Mon Sep 17 00:00:00 2001 From: Henry van Megen Date: Thu, 8 Sep 2022 05:16:35 +0200 Subject: [PATCH 2/6] Disabled debug output (#436) Co-authored-by: Henry van Megen --- ldm/simplet2i.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py index 3b4cf6c54d..c67cf0052c 100644 --- a/ldm/simplet2i.py +++ b/ldm/simplet2i.py @@ -324,7 +324,7 @@ class T2I: self.model.encode_first_stage(init_image) ) # move to latent space - print(f' DEBUG: seed at make_image time ={seed}') + #print(f' DEBUG: seed at make_image time ={seed}') make_image = self._img2img( prompt, steps=steps, @@ -378,7 +378,7 @@ class T2I: if self.device.type == 'mps': x_T = self._get_noise(init_latent,width,height) # make_image will do the equivalent of get_noise itself - print(f' DEBUG: seed at make_image() invocation time ={seed}') + #print(f' DEBUG: seed at make_image() invocation time ={seed}') image = make_image(x_T) results.append([image, seed]) if image_callback is not None: From 79ac0f34201b69c4e8d44ef476a6d96a7844c900 Mon Sep 17 00:00:00 2001 From: Niek van der Maas Date: Fri, 9 Sep 2022 15:01:37 +0200 Subject: [PATCH 3/6] Fix macOS `pyenv` instructions, add code block highlight (#441) Fix: `anaconda3-latest` does not work, specify the correct virtualenv, add missing init. --- README-Mac-MPS.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/README-Mac-MPS.md b/README-Mac-MPS.md index ef103d6b45..99d3181f2e 100644 --- a/README-Mac-MPS.md +++ b/README-Mac-MPS.md @@ -28,7 +28,7 @@ First get the weights checkpoint download started - it's big: While that is downloading, open Terminal and run the following commands one at a time. -``` +```bash # install brew (and Xcode command line tools): /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" @@ -43,9 +43,10 @@ While that is downloading, open Terminal and run the following commands one at a # 1. Installing alongside pyenv brew install pyenv-virtualenv # you might have this from before, no problem -pyenv install anaconda3-latest -pyenv virtualenv anaconda3-latest lstein-stable-diffusion -pyenv activate lstein-stable-diffusion +pyenv install anaconda3-2022.05 +pyenv virtualenv anaconda3-2022.05 +eval "$(pyenv init -)" +pyenv activate anaconda3-2022.05 # OR, # 2. Installing standalone From 62863ac586194a43ff952eba17a83cecf9956500 Mon Sep 17 00:00:00 2001 From: Mihai <299015+mh-dm@users.noreply.github.com> Date: Sat, 10 Sep 2022 16:58:07 +0300 Subject: [PATCH 4/6] ~7% speedup (1.57 to 1.69it/s) from switch to += in ldm.modules.attention. (#482) Tested on 8GB eGPU nvidia setup so YMMV. 512x512 output, max VRAM stays same. --- ldm/modules/attention.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py index 817e9bcdc4..24aef29279 100644 --- a/ldm/modules/attention.py +++ b/ldm/modules/attention.py @@ -235,9 +235,9 @@ class BasicTransformerBlock(nn.Module): def _forward(self, x, context=None): x = x.contiguous() if x.device.type == 'mps' else x - x = self.attn1(self.norm1(x)) + x - x = self.attn2(self.norm2(x), context=context) + x - x = self.ff(self.norm3(x)) + x + x += self.attn1(self.norm1(x)) + x += self.attn2(self.norm2(x), context=context) + x += self.ff(self.norm3(x)) return x From 9d6d728b511de8f0db48ea79b7a4e00838702ff0 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 12 Sep 2022 14:31:48 -0400 Subject: [PATCH 5/6] Squashed commit of the following: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 1c649e4663f37b51b42a561548c7e03d7efb209e Author: Lincoln Stein Date: Mon Sep 12 13:29:16 2022 -0400 fix torchvision dependency version #511 commit 4d197f699e1e8c3b0e7c1b71c30261a49370ee8d Merge: a3e07fb 190ba78 Author: Lincoln Stein Date: Mon Sep 12 07:29:19 2022 -0400 Merge branch 'development' of github.com:lstein/stable-diffusion into development commit a3e07fb84ad51eab2aa586edaa011bbd4e01b395 Author: Lincoln Stein Date: Mon Sep 12 07:28:58 2022 -0400 fix grid crash commit 9fa1f31bf2f80785492927959c58e4b0825fb2e4 Author: Lincoln Stein Date: Mon Sep 12 07:07:05 2022 -0400 fix opencv and realesrgan dependencies in mac install commit 190ba78960c0c45bd1c51626e303b8c78a17b0c1 Author: Lincoln Stein Date: Mon Sep 12 01:50:58 2022 -0400 Update requirements-mac.txt Fixed dangling dash on last line. commit 25d9ccc5091cc6452d8597453dcfe6c79327aa3a Author: Any-Winter-4079 <50542132+Any-Winter-4079@users.noreply.github.com> Date: Mon Sep 12 03:17:29 2022 +0200 Update model.py commit 9cdf3aca7d2a7a6e85ec0a2732eb8e5a2dd60329 Author: Any-Winter-4079 <50542132+Any-Winter-4079@users.noreply.github.com> Date: Mon Sep 12 02:52:36 2022 +0200 Update attention.py Performance improvements to generate larger images in M1 #431 Update attention.py Added dtype=r1.dtype to softmax commit 49a96b90d846bcff17582273cacad596eff30658 Author: Mihai <299015+mh-dm@users.noreply.github.com> Date: Sat Sep 10 16:58:07 2022 +0300 ~7% speedup (1.57 to 1.69it/s) from switch to += in ldm.modules.attention. (#482) Tested on 8GB eGPU nvidia setup so YMMV. 512x512 output, max VRAM stays same. commit aba94b85e88cde654dd03bdec493a6d3b232f931 Author: Niek van der Maas Date: Fri Sep 9 15:01:37 2022 +0200 Fix macOS `pyenv` instructions, add code block highlight (#441) Fix: `anaconda3-latest` does not work, specify the correct virtualenv, add missing init. commit aac5102cf3850781a635cacc3150dd6bb4f486a8 Author: Henry van Megen Date: Thu Sep 8 05:16:35 2022 +0200 Disabled debug output (#436) Co-authored-by: Henry van Megen commit 0ab5a3646424467b459ea878d49cfc23f4a5ea35 Author: Lincoln Stein Date: Sun Sep 11 17:19:46 2022 -0400 fix missing lines in outputs commit 5e433728b550de9f56a2f124c8b325b3a5f2bd2f Author: Lincoln Stein Date: Sun Sep 11 16:20:14 2022 -0400 upped max_steps in v1-finetune.yaml and fixed TI docs to address #493 commit 7708f4fb98510dff504041231261c039a2c718de Author: Lincoln Stein Date: Sun Sep 11 16:03:37 2022 -0400 slight efficiency gain by using += in attention.py commit b86a1deb00892f2b5f260659377d27790ef14016 Author: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon Sep 12 07:47:12 2022 +1200 Remove print statement styling (#504) Co-authored-by: Lincoln Stein commit 4951e66103878e5d5c8943a710ebce9320888252 Author: chromaticist Date: Sun Sep 11 12:44:26 2022 -0700 Adding support for .bin files from huggingface concepts (#498) * Adding support for .bin files from huggingface concepts * Updating documentation to include huggingface .bin info commit 79b445b0ca43b3592a829909dc4507cb1ecbe9e0 Merge: a323070 f7662c1 Author: Lincoln Stein Date: Sun Sep 11 15:39:38 2022 -0400 Merge branch 'development' of github.com:lstein/stable-diffusion into development commit a323070a4dbb1ce62db94342a2ab8e4adef833d6 Author: Lincoln Stein Date: Sun Sep 11 15:28:57 2022 -0400 update requirements for new location of gfpgan commit f7662c1808acc1704316d3b84d4baeacf1b24018 Author: Lincoln Stein Date: Sun Sep 11 15:00:24 2022 -0400 update requirements for changed location of gfpgan commit 93c242c9fbef91d87a6bbf42db2267dbd51e5739 Author: Lincoln Stein Date: Sun Sep 11 14:47:58 2022 -0400 make gfpgan_model_exists flag available to web interface commit c7c6cd7735b5c32e58349ca998a925cbaed7b376 Author: Lincoln Stein Date: Sun Sep 11 14:43:07 2022 -0400 Update UPSCALE.md New instructions needed to accommodate fact that the ESRGAN and GFPGAN packages are now installed by environment.yaml. commit 77ca83e1031639f1e15cb7451e53dd8e37d1e971 Author: Lincoln Stein Date: Sun Sep 11 14:31:56 2022 -0400 Update CLI.md Final documentation tweak. commit 0ea145d1884ce2316452124fd51a879506e2988d Author: Lincoln Stein Date: Sun Sep 11 14:29:26 2022 -0400 Update CLI.md More doc fixes. commit 162285ae86a2ab0bb26749387186c82b6bbf851d Author: Lincoln Stein Date: Sun Sep 11 14:28:45 2022 -0400 Update CLI.md Minor documentation fix commit 37c921dfe2aa25342934a101bf83eea4c0f5cfb7 Author: Lincoln Stein Date: Sun Sep 11 14:26:41 2022 -0400 documentation enhancements commit 4f72cb44ad0429874c9ba507d325267e295a040c Author: Lincoln Stein Date: Sun Sep 11 13:05:38 2022 -0400 moved the notebook files into their own directory commit 878ef2e9e095ab08d00532f8a19556b8949b2dbb Author: Lincoln Stein Date: Sun Sep 11 12:58:06 2022 -0400 documentation tweaks commit 4923118610ecaced2a670d108aef81c220d3507a Merge: 16f6a67 defafc0 Author: Lincoln Stein Date: Sun Sep 11 12:51:25 2022 -0400 Merge branch 'development' of github.com:lstein/stable-diffusion into development commit defafc0e8e0e69b39fd13db12036e1d01e7a19f1 Author: Dominic Letz Date: Sun Sep 11 18:51:01 2022 +0200 Enable upscaling on m1 (#474) commit 16f6a6731d80fcc04dcdb693d74fc5c21e753c10 Author: Lincoln Stein Date: Sun Sep 11 12:47:26 2022 -0400 install GFPGAN inside SD repository in order to fix 'dark cast' issue #169 commit 0881d429f2ddcd288aa673b2b5e9435a8a44371a Author: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Mon Sep 12 03:52:43 2022 +1200 Docs Update (#466) Authored-by: @blessedcoolant Co-authored-by: Lincoln Stein commit 9a29d442b437d650bd42516bbb24ebbcd0d6cd74 Author: Gérald LONLAS Date: Sun Sep 11 23:23:18 2022 +0800 Revert "Add 3x Upscale option on the Web UI (#442)" (#488) This reverts commit f8a540881c79ae657dc05b47bc71f8648e9f9782. commit d301836fbdfce0a3f12b19ae6415e7ae14f53ed2 Author: Lincoln Stein Date: Sun Sep 11 10:52:19 2022 -0400 can select prior output for init_img using -1, -2, etc commit 70aa674e9e10d03eb462249764695ef1d4e1e28c Author: Lincoln Stein Date: Sun Sep 11 10:34:06 2022 -0400 merge PR #495 - keep using float16 in ldm.modules.attention commit 8748370f44e28b104fbaa23b4e2e54e64102d799 Author: Lincoln Stein Date: Sun Sep 11 10:22:32 2022 -0400 negative -S indexing recovers correct previous seed; closes issue #476 commit 839e30e4b8ca6554017fbab671bdf85fadf9a6ea Author: Lincoln Stein Date: Sun Sep 11 10:02:44 2022 -0400 improve CUDA VRAM monitoring extra check that device==cuda before getting VRAM stats commit bfb278127923fbd461c4549a4b7f2f2c1dd34b8c Author: tildebyte <337875+tildebyte@users.noreply.github.com> Date: Sat Sep 10 10:15:56 2022 -0400 fix(readme): add note about updating env via conda (#475) commit 5c439888626145f94db1fdb00f5787ad27b64602 Author: Lincoln Stein Date: Sat Sep 10 10:02:43 2022 -0400 reduce VRAM memory usage by half during model loading * This moves the call to half() before model.to(device) to avoid GPU copy of full model. Improves speed and reduces memory usage dramatically * This fix contributed by @mh-dm (Mihai) commit 99122708ca3342e00063c687f149c950cfd87200 Merge: 817c4a2 ecc6b75 Author: Lincoln Stein Date: Sat Sep 10 09:54:34 2022 -0400 Merge branch 'development' of github.com:lstein/stable-diffusion into development commit 817c4a26de0d01b109550e6db9d4c3ece9f37c1b Author: Lincoln Stein Date: Sat Sep 10 09:53:27 2022 -0400 remove -F option from normalized prompt; closes #483 commit ecc6b75a3ede6d1d2850d69e998c92c342efdf2d Author: Lincoln Stein Date: Sat Sep 10 09:53:27 2022 -0400 remove -F option from normalized prompt commit 723d07444205a9c3da96926630c1dc705db3f130 Author: Lincoln Stein Date: Fri Sep 9 18:49:51 2022 -0400 Allow ctrl c when using --from_file (#472) * added ansi escapes to highlight key parts of CLI session * adjust exception handling so that ^C will abort when reading prompts from a file commit 75f633cda887d7bfcca3ef529d25c52461e11d99 Author: Lincoln Stein Date: Fri Sep 9 12:03:45 2022 -0400 re-add new logo commit 10db192cc4be66b3cebbdaa48a1806807578b56f Author: Lincoln Stein Date: Fri Sep 9 09:26:10 2022 -0400 changes to dogettx optimizations to run on m1 * Author @any-winter-4079 * Author @dogettx Thanks to many individuals who contributed time and hardware to benchmarking and debugging these changes. commit c85ae00b33d619ab5448246ecda6c8e40d66fa3e Author: Lincoln Stein Date: Thu Sep 8 23:57:45 2022 -0400 fix bug which caused seed to get "stuck" on previous image even when UI specified -1 commit 1b5aae3ef3218b3f07b9ec48ce72589c0ad33746 Author: Lincoln Stein Date: Thu Sep 8 22:36:47 2022 -0400 add icon to dream web server commit 6abf739315ef83202ff5ad2144888f79f480d88d Author: Lincoln Stein Date: Thu Sep 8 22:25:09 2022 -0400 add favicon to web server commit db825b813805b7428465e42377d756009e09e836 Merge: 33874ba afee7f9 Author: Lincoln Stein Date: Thu Sep 8 22:17:37 2022 -0400 Merge branch 'deNULL-development' into development commit 33874bae8db71dcdb5525826a1ec93b105e841ad Author: Lincoln Stein Date: Thu Sep 8 22:16:29 2022 -0400 Squashed commit of the following: commit afee7f9cea2a73a3d62ced667e88aa0fe15020e4 Merge: 6531446 171f8db Author: Lincoln Stein Date: Thu Sep 8 22:14:32 2022 -0400 Merge branch 'development' of github.com:deNULL/stable-diffusion into deNULL-development commit 171f8db742f18532b6fa03cdfbf4be2bbf6cf3ad Author: Denis Olshin Date: Thu Sep 8 03:15:20 2022 +0300 saving full prompt to metadata when using web ui commit d7e67b62f0ea9b7c8394b7c48786f5cf9c6f9e94 Author: Denis Olshin Date: Thu Sep 8 01:51:47 2022 +0300 better logic for clicking to make variations commit afee7f9cea2a73a3d62ced667e88aa0fe15020e4 Merge: 6531446 171f8db Author: Lincoln Stein Date: Thu Sep 8 22:14:32 2022 -0400 Merge branch 'development' of github.com:deNULL/stable-diffusion into deNULL-development commit 653144694fbb928d387c615c013ab0f2f1d5ca7f Author: Lincoln Stein Date: Thu Sep 8 20:41:37 2022 -0400 work around unexplained crash when timesteps=1000 (#440) * work around unexplained crash when timesteps=1000 * this fix seems to work commit c33a84cdfdb861a77916cd499e561d4c68ee192a Author: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Fri Sep 9 12:39:51 2022 +1200 Add New Logo (#454) * Add instructions on how to install alongside pyenv (#393) Like probably many others, I have a lot of different virtualenvs, one for each project. Most of them are handled by `pyenv`. After installing according to these instructions I had issues with ´pyenv`and `miniconda` fighting over the $PATH of my system. But then I stumbled upon this nice solution on SO: https://stackoverflow.com/a/73139031 , upon which I have based my suggested changes. It runs perfectly on my M1 setup, with the anaconda setup as a virtual environment handled by pyenv. Feel free to incorporate these instructions as you see fit. Thanks a million for all your hard work. * Disabled debug output (#436) Co-authored-by: Henry van Megen * Add New Logo Co-authored-by: Håvard Gulldahl Co-authored-by: Henry van Megen Co-authored-by: Henry van Megen Co-authored-by: Lincoln Stein commit f8a540881c79ae657dc05b47bc71f8648e9f9782 Author: Gérald LONLAS Date: Fri Sep 9 01:45:54 2022 +0800 Add 3x Upscale option on the Web UI (#442) commit 244239e5f656e1f34830b8e8ce99a40decbea324 Author: James Reynolds Date: Thu Sep 8 05:36:33 2022 -0600 macOS CI workflow, dream.py exits with an error, but the workflow com… (#396) * macOS CI workflow, dream.py exits with an error, but the workflow completes. * Files for testing Co-authored-by: James Reynolds Co-authored-by: Lincoln Stein commit 711d49ed30a0741558ed06d6be38680e00272774 Author: James Reynolds Date: Thu Sep 8 05:35:08 2022 -0600 Cache model workflow (#394) * Add workflow that caches the model, step 1 for CI * Change name of workflow job Co-authored-by: James Reynolds Co-authored-by: Lincoln Stein commit 7996a30e3aea1ae9611bbce6e6efaac60aeb95d4 Author: Lincoln Stein Date: Thu Sep 8 07:34:03 2022 -0400 add auto-creation of mask for inpainting (#438) * now use a single init image for both image and mask * turn on debugging for now to write out mask and image * add back -M option as a fallback commit a69ca31f349ddcf4c94fd009dc896f4e653f7fa4 Author: elliotsayes Date: Thu Sep 8 15:30:06 2022 +1200 .gitignore WebUI temp files (#430) * Add instructions on how to install alongside pyenv (#393) Like probably many others, I have a lot of different virtualenvs, one for each project. Most of them are handled by `pyenv`. After installing according to these instructions I had issues with ´pyenv`and `miniconda` fighting over the $PATH of my system. But then I stumbled upon this nice solution on SO: https://stackoverflow.com/a/73139031 , upon which I have based my suggested changes. It runs perfectly on my M1 setup, with the anaconda setup as a virtual environment handled by pyenv. Feel free to incorporate these instructions as you see fit. Thanks a million for all your hard work. * .gitignore WebUI temp files Co-authored-by: Håvard Gulldahl commit 5c6b612a722ff9cde1a5ddf9b29874842f1d5a26 Author: Lincoln Stein Date: Wed Sep 7 22:50:55 2022 -0400 fix bug that caused same seed to be redisplayed repeatedly commit 56f155c5907224b4276adb6ba01bd5c1a3401ee3 Author: Johan Roxendal Date: Thu Sep 8 04:50:06 2022 +0200 added support for parsing run log and displaying images in the frontend init state (#410) Co-authored-by: Johan Roxendal Co-authored-by: Lincoln Stein commit 41687746be5290a4c3d3437957307666d956ae9d Author: Lincoln Stein Date: Wed Sep 7 20:24:35 2022 -0400 added missing initialization of latent_noise to None commit 171f8db742f18532b6fa03cdfbf4be2bbf6cf3ad Author: Denis Olshin Date: Thu Sep 8 03:15:20 2022 +0300 saving full prompt to metadata when using web ui commit d7e67b62f0ea9b7c8394b7c48786f5cf9c6f9e94 Author: Denis Olshin Date: Thu Sep 8 01:51:47 2022 +0300 better logic for clicking to make variations commit d1d044aa87cf8ba95a7e2e553c7fd993ec81a6d7 Author: Lincoln Stein Date: Wed Sep 7 17:56:59 2022 -0400 actual image seed now written into web log rather than -1 (#428) commit edada042b318028c77ab50dfbaa0b2671cc69e61 Author: Arturo Mendivil <60411196+artmen1516@users.noreply.github.com> Date: Wed Sep 7 10:42:26 2022 -0700 Improve notebook and add requirements file (#422) commit 29ab3c20280bfa73b9a89c8bd9dc99dc0ad7b651 Author: Lincoln Stein Date: Wed Sep 7 13:28:11 2022 -0400 disable neonpixel optimizations on M1 hardware (#414) * disable neonpixel optimizations on M1 hardware * fix typo that was causing random noise images on m1 commit 7670ecc63f3e30e320e2c4197eb7140c6196c168 Author: cody Date: Wed Sep 7 12:24:41 2022 -0500 add more keyboard support on the web server (#391) add ability to submit prompts with the "enter" key add ability to cancel generations with the "escape" key commit dd2aedacaf27d8fe750a342c310bc88de5311931 Author: Lincoln Stein Date: Wed Sep 7 13:23:53 2022 -0400 report VRAM usage stats during initial model loading (#419) commit f6284777e6d79bd3d1e85b83aa72d774299a7403 Author: Lincoln Stein Date: Tue Sep 6 17:12:39 2022 -0400 Squashed commit of the following: commit 7d1344282d942a33dcecda4d5144fc154ec82915 Merge: caf4ea3 ebeb556 Author: Lincoln Stein Date: Mon Sep 5 10:07:27 2022 -0400 Merge branch 'development' of github.com:WebDev9000/stable-diffusion into WebDev9000-development commit ebeb556af9c99b491a83c72f83512683a02a82ad Author: Web Dev 9000 Date: Sun Sep 4 18:05:15 2022 -0700 Fixed unintentionally removed lines commit ff2c4b9a1b773b95686d5f3e546e1194de054694 Author: Web Dev 9000 Date: Sun Sep 4 17:50:13 2022 -0700 Add ability to recreate variations via image click commit c012929cdae7c37aa3b3b4fa2e7de465458f732a Author: Web Dev 9000 Date: Sun Sep 4 14:35:33 2022 -0700 Add files via upload commit 02a601899214adfe4536ce0ba67694a46319fd51 Author: Web Dev 9000 Date: Sun Sep 4 14:35:07 2022 -0700 Add files via upload commit eef788981cbed7c68ffd58b4eb22a2df2e59ae0b Author: Olivier Louvignes Date: Tue Sep 6 12:41:08 2022 +0200 feat(txt2img): allow from_file to work with len(lines) < batch_size (#349) commit 720e5cd6513cd27e6d53feb6475dde20bd39841a Author: Lincoln Stein Date: Mon Sep 5 20:40:10 2022 -0400 Refactoring simplet2i (#387) * start refactoring -not yet functional * first phase of refactor done - not sure weighted prompts working * Second phase of refactoring. Everything mostly working. * The refactoring has moved all the hard-core inference work into ldm.dream.generator.*, where there are submodules for txt2img and img2img. inpaint will go in there as well. * Some additional refactoring will be done soon, but relatively minor work. * fix -save_orig flag to actually work * add @neonsecret attention.py memory optimization * remove unneeded imports * move token logging into conditioning.py * add placeholder version of inpaint; porting in progress * fix crash in img2img * inpainting working; not tested on variations * fix crashes in img2img * ported attention.py memory optimization #117 from basujindal branch * added @torch_no_grad() decorators to img2img, txt2img, inpaint closures * Final commit prior to PR against development * fixup crash when generating intermediate images in web UI * rename ldm.simplet2i to ldm.generate * add backward-compatibility simplet2i shell with deprecation warning * add back in mps exception, addresses @vargol comment in #354 * replaced Conditioning class with exported functions * fix wrong type of with_variations attribute during intialization * changed "image_iterator()" to "get_make_image()" * raise NotImplementedError for calling get_make_image() in parent class * Update ldm/generate.py better error message Co-authored-by: Kevin Gibbons * minor stylistic fixes and assertion checks from code review * moved get_noise() method into img2img class * break get_noise() into two methods, one for txt2img and the other for img2img * inpainting works on non-square images now * make get_noise() an abstract method in base class * much improved inpainting Co-authored-by: Kevin Gibbons commit 1ad2a8e567b054cfe9df1715aa805218ee185754 Author: thealanle <35761977+thealanle@users.noreply.github.com> Date: Mon Sep 5 17:35:04 2022 -0700 Fix --outdir function for web (#373) * Fix --outdir function for web * Removed unnecessary hardcoded path commit 52d8bb2836cf05994ee5e2c5cf9c8d190dac0524 Author: Lincoln Stein Date: Mon Sep 5 10:31:59 2022 -0400 Squashed commit of the following: commit 0cd48e932f1326e000c46f4140f98697eb9bdc79 Author: Lincoln Stein Date: Mon Sep 5 10:27:43 2022 -0400 resolve conflicts with development commit d7bc8c12e05535a363ac7c745a3f3abc2773bfcf Author: Scott McMillin Date: Sun Sep 4 18:52:09 2022 -0500 Add title attribute back to img tag commit 5397c89184ebfb8260bc2d8c3f23e73e103d24e6 Author: Scott McMillin Date: Sun Sep 4 13:49:46 2022 -0500 Remove temp code commit 1da080b50972696db2930681a09cb1c14e524758 Author: Scott McMillin Date: Sun Sep 4 13:33:56 2022 -0500 Cleaned up HTML; small style changes; image click opens image; add seed to figcaption beneath image commit caf4ea3d8982416dcf5a80fe4601ac4fbc126cc0 Author: Adam Rice Date: Mon Sep 5 10:05:39 2022 -0400 Add a 'Remove Image' button to clear the file upload field (#382) * added "remove image" button * styled a new "remove image" button * Update index.js commit 95c088b30342c75ec2ab8c7d7a423ffd11c50099 Author: Kevin Gibbons Date: Sun Sep 4 19:04:14 2022 -0700 Revert "Add CORS headers to dream server to ease integration with third-party web interfaces" (#371) This reverts commit 91e826e5f425333674d1e3bec1fa1ac63cfb382d. commit a20113d5a3985a23b7e19301acb57688e31e975c Author: Kevin Gibbons Date: Sun Sep 4 18:59:12 2022 -0700 put no_grad decorator on make_image closures (#375) commit 0f93dadd6ac5aa0fbeee5d72150def775752a153 Author: Lincoln Stein Date: Sun Sep 4 21:39:15 2022 -0400 fix several dangling references to --gfpgan option, which no longer exists commit f4004f660e5daba721426cfcd3fe95318fd10bc3 Author: tildebyte <337875+tildebyte@users.noreply.github.com> Date: Sun Sep 4 19:43:04 2022 -0400 TOIL(requirements): Split requirements to per-platform (#355) * toil(reqs): split requirements to per-platform Signed-off-by: Ben Alkov * toil(reqs): fix for Win and Lin... ...allow pip to resolve latest torch, numpy Signed-off-by: Ben Alkov * toil(install): update reqs in Win install notebook Signed-off-by: Ben Alkov Signed-off-by: Ben Alkov commit 4406fd138dec0e25409aeaa2b716f88dd95b76d1 Merge: 5116c81 fd7a72e Author: Lincoln Stein Date: Sun Sep 4 08:23:53 2022 -0400 Merge branch 'SebastianAigner-main' into development Add support for full CORS headers for dream server. commit fd7a72e147393f32fc40d8f5918ea9bf1401e723 Author: Lincoln Stein Date: Sun Sep 4 08:23:11 2022 -0400 remove debugging message commit 3a2be621f36e66b16e60b7f4f9210babfe84c582 Merge: 91e826e 5116c81 Author: Lincoln Stein Date: Sun Sep 4 08:15:51 2022 -0400 Merge branch 'development' into main commit 5116c8178c67f550e57f5d16fe931ee1a7cdb0ba Author: Justin Wong <1584142+wongjustin99@users.noreply.github.com> Date: Sun Sep 4 07:17:58 2022 -0400 fix save_original flag saving to the same filename (#360) * Update README.md with new Anaconda install steps (#347) pip3 version did not work for me and this is the recommended way to install Anaconda now it seems * fix save_original flag saving to the same filename Before this, the `--save_orig` flag was not working. The upscaled/GFPGAN would overwrite the original output image. Co-authored-by: greentext2 <112735219+greentext2@users.noreply.github.com> commit 91e826e5f425333674d1e3bec1fa1ac63cfb382d Author: Sebastian Aigner Date: Sun Sep 4 10:22:54 2022 +0200 Add CORS headers to dream server to ease integration with third-party web interfaces commit 6266d9e8d6421ee732338560f825771e461cefb0 Author: Lincoln Stein Date: Sat Sep 3 15:45:20 2022 -0400 remove stray debugging message commit 138956e5162679f6894ce75462907c9eeed83cbb Author: greentext2 <112735219+greentext2@users.noreply.github.com> Date: Sat Sep 3 13:38:57 2022 -0500 Update README.md with new Anaconda install steps (#347) pip3 version did not work for me and this is the recommended way to install Anaconda now it seems commit 60be735e802a1c3cd2812c5d8e63f9ed467ea9d9 Author: Cora Johnson-Roberson Date: Sat Sep 3 14:28:34 2022 -0400 Switch to regular pytorch channel and restore Python 3.10 for Macs. (#301) * Switch to regular pytorch channel and restore Python 3.10 for Macs. Although pytorch-nightly should in theory be faster, it is currently causing increased memory usage and slower iterations: https://github.com/lstein/stable-diffusion/pull/283#issuecomment-1234784885 This changes the environment-mac.yaml file back to the regular pytorch channel and moves the `transformers` dep into pip for now (since it cannot be satisfied until tokenizers>=0.11 is built for Python 3.10). * Specify versions for Pip packages as well. commit d0d95d3a2a4b7a91c5c4f570d88af43a2c3afe75 Author: Lincoln Stein Date: Sat Sep 3 14:10:31 2022 -0400 make initimg appear in web log commit b90a21500037f07bb1b5d143045253ee6bc67391 Merge: 1eee811 6270e31 Author: Lincoln Stein Date: Sat Sep 3 13:47:15 2022 -0400 Merge branch 'prixt-seamless' into development commit 6270e313b8d87b33cb914f12558e34bc2f0ae357 Author: Lincoln Stein Date: Sat Sep 3 13:46:29 2022 -0400 add credit to prixt for seamless circular tiling commit a01b7bdc40af5376177de30b76dc075b523b3450 Merge: 1eee811 9d88abe Author: Lincoln Stein Date: Sat Sep 3 13:43:04 2022 -0400 add web interface for seamless option commit 1eee8111b95241f54b49f58605ab343a52325b89 Merge: 64eca42 fb857f0 Author: Lincoln Stein Date: Sat Sep 3 12:33:39 2022 -0400 Merge branch 'development' of github.com:lstein/stable-diffusion into development commit 64eca42610b92cb73a30c405ab9dad28990c15e1 Merge: 9130ad7 21a1f68 Author: Lincoln Stein Date: Sat Sep 3 12:33:05 2022 -0400 Merge branch 'main' into development * brings in small documentation fixes that were added directly to main during release tweaking. commit fb857f05ba0eda5cf9bbe0f60b73a73d75562d85 Author: Lincoln Stein Date: Sat Sep 3 12:07:07 2022 -0400 fix typo in docs commit 9d88abe2ea1fed6231ffd822956614589a1075b7 Author: prixt Date: Sat Sep 3 22:42:16 2022 +0900 fixed typo commit a61e49bc974af0fc01c8424d7df9262f63ecf289 Author: prixt Date: Sat Sep 3 22:39:35 2022 +0900 * Removed unnecessary code * Added description about --seamless commit 02bee4fdb1534b71c5e609204506efb66699b2bc Author: prixt Date: Sat Sep 3 16:08:03 2022 +0900 added --seamless tag logging to normalize_prompt commit d922b53c26f3e9a11ecb920536b9632ec69df5f6 Author: prixt Date: Sat Sep 3 15:13:31 2022 +0900 added seamless tiling mode and commands --- .github/workflows/cache-model.yml | 64 ++ .github/workflows/macos12-miniconda.yml | 80 ++ .gitignore | 3 + README.md | 791 ++------------- configs/stable-diffusion/v1-finetune.yaml | 3 +- CHANGELOG.md => docs/CHANGELOG.md | 2 +- docs/CONTRIBUTORS.md | 61 ++ README-CompViz.md => docs/README-CompViz.md | 90 +- {static => docs/assets}/colab_notebook.png | Bin {static => docs/assets}/dream-py-demo.png | Bin {static => docs/assets}/dream_web_server.png | Bin docs/assets/logo.png | Bin 0 -> 22220 bytes .../variation_walkthru/000001.3357757885.png | Bin .../variation_walkthru/000002.1614299449.png | Bin .../variation_walkthru/000002.3647897225.png | Bin .../variation_walkthru/000003.1614299449.png | Bin .../variation_walkthru/000004.3747154981.png | Bin docs/features/CLI.md | 228 +++++ docs/features/IMG2IMG.md | 30 + docs/features/INPAINTING.md | 41 + docs/features/OTHER.md | 133 +++ docs/features/TEXTUAL_INVERSION.md | 70 ++ docs/features/UPSCALE.md | 105 ++ VARIATIONS.md => docs/features/VARIATIONS.md | 99 +- docs/features/WEB.md | 13 + docs/help/TROUBLESHOOT.md | 68 ++ docs/installation/INSTALL_LINUX.md | 89 ++ .../installation/INSTALL_MAC.md | 123 +-- docs/installation/INSTALL_WINDOWS.md | 112 +++ environment-mac.yaml | 34 +- environment.yaml | 6 +- ldm/dream/conditioning.py | 96 ++ ldm/dream/devices.py | 11 +- ldm/dream/generator/__init__.py | 4 + ldm/dream/generator/base.py | 158 +++ ldm/dream/generator/img2img.py | 72 ++ ldm/dream/generator/inpaint.py | 77 ++ ldm/dream/generator/txt2img.py | 61 ++ ldm/dream/pngwriter.py | 6 +- ldm/dream/readline.py | 8 +- ldm/dream/server.py | 165 +-- ldm/generate.py | 695 +++++++++++++ ldm/gfpgan/gfpgan_tools.py | 100 +- ldm/models/diffusion/ddim.py | 31 +- ldm/modules/attention.py | 280 +++--- ldm/modules/diffusionmodules/model.py | 939 ++++++++---------- ldm/modules/diffusionmodules/util.py | 4 +- ldm/modules/embedding_manager.py | 27 +- ldm/simplet2i.py | 851 +--------------- .../Stable-Diffusion-local-Windows.ipynb | 16 +- .../Stable_Diffusion_AI_Notebook.ipynb | 131 ++- .../notebook_helpers.py | 0 requirements-colab.txt | 26 + requirements-lin.txt | 33 + requirements.txt => requirements-mac.txt | 3 +- requirements-win.txt | 33 + scripts/dream.py | 152 ++- scripts/orig_scripts/txt2img.py | 11 +- static/dream_web/favicon.ico | Bin 0 -> 1150 bytes static/dream_web/index.css | 91 +- static/dream_web/index.html | 177 ++-- static/dream_web/index.js | 70 +- static/logo_temp.png | Bin 35209 -> 0 bytes tests/prompts.txt | 1 + 64 files changed, 3836 insertions(+), 2738 deletions(-) create mode 100644 .github/workflows/cache-model.yml create mode 100644 .github/workflows/macos12-miniconda.yml rename CHANGELOG.md => docs/CHANGELOG.md (99%) create mode 100644 docs/CONTRIBUTORS.md rename README-CompViz.md => docs/README-CompViz.md (83%) rename {static => docs/assets}/colab_notebook.png (100%) rename {static => docs/assets}/dream-py-demo.png (100%) rename {static => docs/assets}/dream_web_server.png (100%) create mode 100644 docs/assets/logo.png rename {static => docs/assets}/variation_walkthru/000001.3357757885.png (100%) rename {static => docs/assets}/variation_walkthru/000002.1614299449.png (100%) rename {static => docs/assets}/variation_walkthru/000002.3647897225.png (100%) rename {static => docs/assets}/variation_walkthru/000003.1614299449.png (100%) rename {static => docs/assets}/variation_walkthru/000004.3747154981.png (100%) create mode 100644 docs/features/CLI.md create mode 100644 docs/features/IMG2IMG.md create mode 100644 docs/features/INPAINTING.md create mode 100644 docs/features/OTHER.md create mode 100644 docs/features/TEXTUAL_INVERSION.md create mode 100644 docs/features/UPSCALE.md rename VARIATIONS.md => docs/features/VARIATIONS.md (58%) create mode 100644 docs/features/WEB.md create mode 100644 docs/help/TROUBLESHOOT.md create mode 100644 docs/installation/INSTALL_LINUX.md rename README-Mac-MPS.md => docs/installation/INSTALL_MAC.md (70%) create mode 100644 docs/installation/INSTALL_WINDOWS.md create mode 100644 ldm/dream/conditioning.py create mode 100644 ldm/dream/generator/__init__.py create mode 100644 ldm/dream/generator/base.py create mode 100644 ldm/dream/generator/img2img.py create mode 100644 ldm/dream/generator/inpaint.py create mode 100644 ldm/dream/generator/txt2img.py create mode 100644 ldm/generate.py rename Stable-Diffusion-local-Windows.ipynb => notebooks/Stable-Diffusion-local-Windows.ipynb (92%) rename Stable_Diffusion_AI_Notebook.ipynb => notebooks/Stable_Diffusion_AI_Notebook.ipynb (76%) rename notebook_helpers.py => notebooks/notebook_helpers.py (100%) create mode 100644 requirements-colab.txt create mode 100644 requirements-lin.txt rename requirements.txt => requirements-mac.txt (87%) create mode 100644 requirements-win.txt create mode 100644 static/dream_web/favicon.ico delete mode 100644 static/logo_temp.png create mode 100644 tests/prompts.txt diff --git a/.github/workflows/cache-model.yml b/.github/workflows/cache-model.yml new file mode 100644 index 0000000000..2682943eef --- /dev/null +++ b/.github/workflows/cache-model.yml @@ -0,0 +1,64 @@ +name: Cache Model +on: + workflow_dispatch +jobs: + build: + strategy: + matrix: + os: [ macos-12 ] + name: Create Caches using ${{ matrix.os }} + runs-on: ${{ matrix.os }} + steps: + - name: Checkout sources + uses: actions/checkout@v3 + - name: Cache model + id: cache-sd-v1-4 + uses: actions/cache@v3 + env: + cache-name: cache-sd-v1-4 + with: + path: models/ldm/stable-diffusion-v1/model.ckpt + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }} + - name: Download Stable Diffusion v1.4 model + if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }} + continue-on-error: true + run: | + if [ ! -e models/ldm/stable-diffusion-v1 ]; then + mkdir -p models/ldm/stable-diffusion-v1 + fi + if [ ! -e models/ldm/stable-diffusion-v1/model.ckpt ]; then + curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }} + fi +# Uncomment this when we no longer make changes to environment-mac.yaml +# - name: Cache environment +# id: cache-conda-env-ldm +# uses: actions/cache@v3 +# env: +# cache-name: cache-conda-env-ldm +# with: +# path: ~/.conda/envs/ldm +# key: ${{ env.cache-name }} +# restore-keys: | +# ${{ env.cache-name }} + - name: Install dependencies +# if: ${{ steps.cache-conda-env-ldm.outputs.cache-hit != 'true' }} + run: | + conda env create -f environment-mac.yaml + - name: Cache hugginface and torch models + id: cache-hugginface-torch + uses: actions/cache@v3 + env: + cache-name: cache-hugginface-torch + with: + path: ~/.cache + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }} + - name: Download Huggingface and Torch models + if: ${{ steps.cache-hugginface-torch.outputs.cache-hit != 'true' }} + continue-on-error: true + run: | + export PYTHON_BIN=/usr/local/miniconda/envs/ldm/bin/python + $PYTHON_BIN scripts/preload_models.py \ No newline at end of file diff --git a/.github/workflows/macos12-miniconda.yml b/.github/workflows/macos12-miniconda.yml new file mode 100644 index 0000000000..18f21277c0 --- /dev/null +++ b/.github/workflows/macos12-miniconda.yml @@ -0,0 +1,80 @@ +name: Build +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] +jobs: + build: + strategy: + matrix: + os: [ macos-12 ] + name: Build on ${{ matrix.os }} miniconda + runs-on: ${{ matrix.os }} + steps: + - name: Checkout sources + uses: actions/checkout@v3 + - name: Cache model + id: cache-sd-v1-4 + uses: actions/cache@v3 + env: + cache-name: cache-sd-v1-4 + with: + path: models/ldm/stable-diffusion-v1/model.ckpt + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }} + - name: Download Stable Diffusion v1.4 model + if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }} + continue-on-error: true + run: | + if [ ! -e models/ldm/stable-diffusion-v1 ]; then + mkdir -p models/ldm/stable-diffusion-v1 + fi + if [ ! -e models/ldm/stable-diffusion-v1/model.ckpt ]; then + curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }} + fi +# Uncomment this when we no longer make changes to environment-mac.yaml +# - name: Cache environment +# id: cache-conda-env-ldm +# uses: actions/cache@v3 +# env: +# cache-name: cache-conda-env-ldm +# with: +# path: ~/.conda/envs/ldm +# key: ${{ env.cache-name }} +# restore-keys: | +# ${{ env.cache-name }} + - name: Install dependencies +# if: ${{ steps.cache-conda-env-ldm.outputs.cache-hit != 'true' }} + run: | + conda env create -f environment-mac.yaml + - name: Cache hugginface and torch models + id: cache-hugginface-torch + uses: actions/cache@v3 + env: + cache-name: cache-hugginface-torch + with: + path: ~/.cache + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }} + - name: Download Huggingface and Torch models + if: ${{ steps.cache-hugginface-torch.outputs.cache-hit != 'true' }} + continue-on-error: true + run: | + export PYTHON_BIN=/usr/local/miniconda/envs/ldm/bin/python + $PYTHON_BIN scripts/preload_models.py + - name: Run the tests + run: | + # Note, can't "activate" via automation, and activation is just env vars and path + export PYTHON_BIN=/usr/local/miniconda/envs/ldm/bin/python + export PYTORCH_ENABLE_MPS_FALLBACK=1 + $PYTHON_BIN scripts/preload_models.py + mkdir -p outputs/img-samples + time $PYTHON_BIN scripts/dream.py --from_file tests/prompts.txt outputs/img-samples/err.log > outputs/img-samples/out.log + - name: Archive results + uses: actions/upload-artifact@v3 + with: + name: results + path: outputs/img-samples \ No newline at end of file diff --git a/.gitignore b/.gitignore index fd75e65a48..df1e55ee6d 100644 --- a/.gitignore +++ b/.gitignore @@ -77,6 +77,9 @@ db.sqlite3-journal instance/ .webassets-cache +# WebUI temp files: +img2img-tmp.png + # Scrapy stuff: .scrapy diff --git a/README.md b/README.md index a171a2bea7..1737e6515b 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

Stable Diffusion Dream Script

- +

@@ -12,770 +12,144 @@ pull-requests

-This is a fork of CompVis/stable-diffusion, the wonderful open source -text-to-image generator. This fork supports: +# **Stable Diffusion Dream Script** -1. An interactive command-line interface that accepts the same prompt - and switches as the Discord bot. +This is a fork of +[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion), +the open source text-to-image generator. It provides a streamlined +process with various new features and options to aid the image +generation process. It runs on Windows, Mac and Linux machines, +and runs on GPU cards with as little as 4 GB or RAM. -2. A basic Web interface that allows you to run a local web server for - generating images in your browser. +_Note: This fork is rapidly evolving. Please use the +[Issues](https://github.com/lstein/stable-diffusion/issues) tab to +report bugs and make feature requests. Be sure to use the provided +templates. They will help aid diagnose issues faster._ -3. Support for img2img in which you provide a seed image to guide the - image creation. (inpainting & masking coming soon) +# **Table of Contents** -4. A notebook for running the code on Google Colab. - -5. Upscaling and face fixing using the optional ESRGAN and GFPGAN - packages. - -6. Weighted subprompts for prompt tuning. - -7. [Image variations](VARIATIONS.md) which allow you to systematically -generate variations of an image you like and combine two or more -images together to combine the best features of both. - -8. Textual inversion for customization of the prompt language and images. - -8. ...and more! - -This fork is rapidly evolving, so use the Issues panel to report bugs -and make feature requests, and check back periodically for -improvements and bug fixes. - -# Table of Contents - -1. [Major Features](#features) -2. [Changelog](#latest-changes) -3. [Installation](#installation) - 1. [Linux](#linux) - 1. [Windows](#windows) - 1. [MacOS](README-Mac-MPS.md) +1. [Installation](#installation) +2. [Major Features](#features) +3. [Changelog](#latest-changes) 4. [Troubleshooting](#troubleshooting) 5. [Contributing](#contributing) 6. [Support](#support) -# Features +# Installation -## Interactive command-line interface similar to the Discord bot +This fork is supported across multiple platforms. You can find individual installation instructions below. -The _dream.py_ script, located in scripts/dream.py, -provides an interactive interface to image generation similar to -the "dream mothership" bot that Stable AI provided on its Discord -server. Unlike the txt2img.py and img2img.py scripts provided in the -original CompViz/stable-diffusion source code repository, the -time-consuming initialization of the AI model -initialization only happens once. After that image generation -from the command-line interface is very fast. +- ## [Linux](docs/installation/INSTALL_LINUX.md) +- ## [Windows](docs/installation/INSTALL_WINDOWS.md) +- ## [Macintosh](docs/installation/INSTALL_MAC.md) -The script uses the readline library to allow for in-line editing, -command history (up and down arrows), autocompletion, and more. To help -keep track of which prompts generated which images, the script writes a -log file of image names and prompts to the selected output directory. -In addition, as of version 1.02, it also writes the prompt into the PNG -file's metadata where it can be retrieved using scripts/images2prompt.py +## **Hardware Requirements** -The script is confirmed to work on Linux and Windows systems. It should -work on MacOSX as well, but this is not confirmed. Note that this script -runs from the command-line (CMD or Terminal window), and does not have a GUI. +**System** -``` -(ldm) ~/stable-diffusion$ python3 ./scripts/dream.py -* Initializing, be patient... -Loading model from models/ldm/text2img-large/model.ckpt -(...more initialization messages...) +You wil need one of the following: -* Initialization done! Awaiting your command... -dream> ashley judd riding a camel -n2 -s150 -Outputs: - outputs/img-samples/00009.png: "ashley judd riding a camel" -n2 -s150 -S 416354203 - outputs/img-samples/00010.png: "ashley judd riding a camel" -n2 -s150 -S 1362479620 +- An NVIDIA-based graphics card with 4 GB or more VRAM memory. +- An Apple computer with an M1 chip. -dream> "there's a fly in my soup" -n6 -g - outputs/img-samples/00011.png: "there's a fly in my soup" -n6 -g -S 2685670268 - seeds for individual rows: [2685670268, 1216708065, 2335773498, 822223658, 714542046, 3395302430] -dream> q +**Memory** -# this shows how to retrieve the prompt stored in the saved image's metadata -(ldm) ~/stable-diffusion$ python3 ./scripts/images2prompt.py outputs/img_samples/*.png -00009.png: "ashley judd riding a camel" -s150 -S 416354203 -00010.png: "ashley judd riding a camel" -s150 -S 1362479620 -00011.png: "there's a fly in my soup" -n6 -g -S 2685670268 -``` +- At least 12 GB Main Memory RAM. -

- -

+**Disk** -The dream> prompt's arguments are pretty much identical to those used -in the Discord bot, except you don't need to type "!dream" (it doesn't -hurt if you do). A significant change is that creation of individual -images is now the default unless --grid (-g) is given. For backward -compatibility, the -i switch is recognized. For command-line help -type -h (or --help) at the dream> prompt. - -The script itself also recognizes a series of command-line switches -that will change important global defaults, such as the directory for -image outputs and the location of the model weight files. - -## Image-to-Image - -This script also provides an img2img feature that lets you seed your -creations with a drawing or photo. This is a really cool feature that tells -stable diffusion to build the prompt on top of the image you provide, preserving -the original's basic shape and layout. To use it, provide the --init_img -option as shown here: - -``` -dream> "waterfall and rainbow" --init_img=./init-images/crude_drawing.png --strength=0.5 -s100 -n4 -``` - -The --init_img (-I) option gives the path to the seed picture. --strength (-f) controls how much -the original will be modified, ranging from 0.0 (keep the original intact), to 1.0 (ignore the original -completely). The default is 0.75, and ranges from 0.25-0.75 give interesting results. - -You may also pass a -v option to generate count variants on the original image. This is done by -passing the first generated image back into img2img the requested number of times. It generates interesting -variants. - -## GFPGAN and Real-ESRGAN Support - -The script also provides the ability to do face restoration and -upscaling with the help of GFPGAN and Real-ESRGAN respectively. - -To use the ability, clone the **[GFPGAN -repository](https://github.com/TencentARC/GFPGAN)** and follow their -installation instructions. By default, we expect GFPGAN to be -installed in a 'GFPGAN' sibling directory. Be sure that the `"ldm"` -conda environment is active as you install GFPGAN. - -You can use the `--gfpgan_dir` argument with `dream.py` to set a -custom path to your GFPGAN directory. _There are other GFPGAN related -boot arguments if you wish to customize further._ - -You can install **Real-ESRGAN** by typing the following command. - -``` -pip install realesrgan -``` - -**Note: Internet connection needed:** -Users whose GPU machines are isolated from the Internet (e.g. on a -University cluster) should be aware that the first time you run -dream.py with GFPGAN and Real-ESRGAN turned on, it will try to -download model files from the Internet. To rectify this, you may run -`python3 scripts/preload_models.py` after you have installed GFPGAN -and all its dependencies. - -**Usage** - -You will now have access to two new prompt arguments. - -**Upscaling** - -`-U : ` - -The upscaling prompt argument takes two values. The first value is a -scaling factor and should be set to either `2` or `4` only. This will -either scale the image 2x or 4x respectively using different models. - -You can set the scaling stength between `0` and `1.0` to control -intensity of the of the scaling. This is handy because AI upscalers -generally tend to smooth out texture details. If you wish to retain -some of those for natural looking results, we recommend using values -between `0.5 to 0.8`. - -If you do not explicitly specify an upscaling_strength, it will -default to 0.75. - -**Face Restoration** - -`-G : ` - -This prompt argument controls the strength of the face restoration -that is being applied. Similar to upscaling, values between `0.5 to 0.8` are recommended. - -You can use either one or both without any conflicts. In cases where -you use both, the image will be first upscaled and then the face -restoration process will be executed to ensure you get the highest -quality facial features. - -`--save_orig` - -When you use either `-U` or `-G`, the final result you get is upscaled -or face modified. If you want to save the original Stable Diffusion -generation, you can use the `-save_orig` prompt argument to save the -original unaffected version too. - -**Example Usage** - -``` -dream > superman dancing with a panda bear -U 2 0.6 -G 0.4 -``` - -This also works with img2img: - -``` -dream> a man wearing a pineapple hat -I path/to/your/file.png -U 2 0.5 -G 0.6 -``` +- At least 6 GB of free disk space for the machine learning model, Python, and all its dependencies. **Note** -GFPGAN and Real-ESRGAN are both memory intensive. In order to avoid -crashes and memory overloads during the Stable Diffusion process, -these effects are applied after Stable Diffusion has completed its -work. +If you are have a Nvidia 10xx series card (e.g. the 1080ti), please +run the dream script in full-precision mode as shown below. -In single image generations, you will see the output right away but -when you are using multiple iterations, the images will first be -generated and then upscaled and face restored after that process is -complete. While the image generation is taking place, you will still -be able to preview the base images. +Similarly, specify full-precision mode on Apple M1 hardware. -If you wish to stop during the image generation but want to upscale or -face restore a particular generated image, pass it again with the same -prompt and generated seed along with the `-U` and `-G` prompt -arguments to perform those actions. - -## Google Colab - -Stable Diffusion AI Notebook: Open In Colab
-Open and follow instructions to use an isolated environment running Dream.
- -Output example: -![Colab Notebook](static/colab_notebook.png) - -## Barebones Web Server - -As of version 1.10, this distribution comes with a bare bones web -server (see screenshot). To use it, run the _dream.py_ script by -adding the **--web** option. +To run in full-precision mode, start `dream.py` with the +`--full_precision` flag: ``` -(ldm) ~/stable-diffusion$ python3 scripts/dream.py --web +(ldm) ~/stable-diffusion$ python scripts/dream.py --full_precision ``` -You can then connect to the server by pointing your web browser at -http://localhost:9090, or to the network name or IP address of the server. +# Features -Kudos to [Tesseract Cat](https://github.com/TesseractCat) for -contributing this code, and to [dagf2101](https://github.com/dagf2101) -for refining it. +## **Major Features** -![Dream Web Server](static/dream_web_server.png) +- ## [Interactive Command Line Interface](docs/features/CLI.md) -## Reading Prompts from a File +- ## [Image To Image](docs/features/IMG2IMG.md) -You can automate dream.py by providing a text file with the prompts -you want to run, one line per prompt. The text file must be composed -with a text editor (e.g. Notepad) and not a word processor. Each line -should look like what you would type at the dream> prompt: +- ## [Inpainting Support](docs/features/INPAINTING.md) -``` -a beautiful sunny day in the park, children playing -n4 -C10 -stormy weather on a mountain top, goats grazing -s100 -innovative packaging for a squid's dinner -S137038382 -``` +- ## [GFPGAN and Real-ESRGAN Support](docs/features/UPSCALE.md) -Then pass this file's name to dream.py when you invoke it: +- ## [Seamless Tiling](docs/features/OTHER.md#seamless-tiling) -``` -(ldm) ~/stable-diffusion$ python3 scripts/dream.py --from_file "path/to/prompts.txt" -``` +- ## [Google Colab](docs/features/OTHER.md#google-colab) -You may read a series of prompts from standard input by providing a filename of "-": +- ## [Web Server](docs/features/WEB.md) -``` -(ldm) ~/stable-diffusion$ echo "a beautiful day" | python3 scripts/dream.py --from_file - -``` +- ## [Reading Prompts From File](docs/features/OTHER.md#reading-prompts-from-a-file) -## Shortcut for reusing seeds from the previous command +- ## [Shortcut: Reusing Seeds](docs/features/OTHER.md#shortcuts-reusing-seeds) -Since it is so common to reuse seeds while refining a prompt, there is -now a shortcut as of version 1.11. Provide a **-S** (or **--seed**) -switch of -1 to use the seed of the most recent image generated. If -you produced multiple images with the **-n** switch, then you can go -back further using -2, -3, etc. up to the first image generated by the -previous command. Sorry, but you can't go back further than one -command. +- ## [Weighted Prompts](docs/features/OTHER.md#weighted-prompts) -Here's an example of using this to do a quick refinement. It also -illustrates using the new **-G** switch to turn on upscaling and -face enhancement (see previous section): +- ## [Variations](docs/features/VARIATIONS.md) -``` -dream> a cute child playing hopscotch -G0.5 -[...] -outputs/img-samples/000039.3498014304.png: "a cute child playing hopscotch" -s50 -W512 -H512 -C7.5 -mk_lms -S3498014304 +- ## [Personalizing Text-to-Image Generation](docs/features/TEXTUAL_INVERSION.md) -# I wonder what it will look like if I bump up the steps and set facial enhancement to full strength? -dream> a cute child playing hopscotch -G1.0 -s100 -S -1 -reusing previous seed 3498014304 -[...] -outputs/img-samples/000040.3498014304.png: "a cute child playing hopscotch" -G1.0 -s100 -W512 -H512 -C7.5 -mk_lms -S3498014304 -``` +- ## [Simplified API for text to image generation](docs/features/OTHER.md#simplified-api) -## Weighted Prompts +## **Other Features** -You may weight different sections of the prompt to tell the sampler to attach different levels of -priority to them, by adding :(number) to the end of the section you wish to up- or downweight. -For example consider this prompt: +- ### [Creating Transparent Regions for Inpainting](docs/features/INPAINTING.md#creating-transparent-regions-for-inpainting) -``` - tabby cat:0.25 white duck:0.75 hybrid -``` - -This will tell the sampler to invest 25% of its effort on the tabby -cat aspect of the image and 75% on the white duck aspect -(surprisingly, this example actually works). The prompt weights can -use any combination of integers and floating point numbers, and they -do not need to add up to 1. - -## Personalizing Text-to-Image Generation - -You may personalize the generated images to provide your own styles or objects by training a new LDM checkpoint -and introducing a new vocabulary to the fixed model. - -To train, prepare a folder that contains images sized at 512x512 and execute the following: - - -WINDOWS: As the default backend is not available on Windows, if you're using that platform, set the environment variable `PL_TORCH_DISTRIBUTED_BACKEND=gloo` - -``` -(ldm) ~/stable-diffusion$ python3 ./main.py --base ./configs/stable-diffusion/v1-finetune.yaml \ - -t \ - --actual_resume ./models/ldm/stable-diffusion-v1/model.ckpt \ - -n my_cat \ - --gpus 0, \ - --data_root D:/textual-inversion/my_cat \ - --init_word 'cat' -``` - -During the training process, files will be created in /logs/[project][time][project]/ -where you can see the process. - -conditioning\* contains the training prompts -inputs, reconstruction the input images for the training epoch -samples, samples scaled for a sample of the prompt and one with the init word provided - -On a RTX3090, the process for SD will take ~1h @1.6 iterations/sec. - -Note: According to the associated paper, the optimal number of images -is 3-5. Your model may not converge if you use more images than that. - -Training will run indefinately, but you may wish to stop it before the -heat death of the universe, when you find a low loss epoch or around -~5000 iterations. - -Once the model is trained, specify the trained .pt file when starting -dream using - -``` -(ldm) ~/stable-diffusion$ python3 ./scripts/dream.py --embedding_path /path/to/embedding.pt --full_precision -``` - -Then, to utilize your subject at the dream prompt - -``` -dream> "a photo of *" -``` - -this also works with image2image - -``` -dream> "waterfall and rainbow in the style of *" --init_img=./init-images/crude_drawing.png --strength=0.5 -s100 -n4 -``` - -It's also possible to train multiple tokens (modify the placeholder string in configs/stable-diffusion/v1-finetune.yaml) and combine LDM checkpoints using: - -``` -(ldm) ~/stable-diffusion$ python3 ./scripts/merge_embeddings.py \ - --manager_ckpts /path/to/first/embedding.pt /path/to/second/embedding.pt [...] \ - --output_path /path/to/output/embedding.pt -``` - -Credit goes to @rinongal and the repository located at -https://github.com/rinongal/textual_inversion Please see the -repository and associated paper for details and limitations. +- ### [Preload Models](docs/features/OTHER.md#preload-models) # Latest Changes -- v1.13 (3 September 2022) +- v1.14 (11 September 2022) - - Support image variations (see [VARIATIONS](VARIATIONS.md) ([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers) + - Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs. + - Full support for Apple hardware with M1 or M2 chips. + - Add "seamless mode" for circular tiling of image. Generates beautiful effects. ([prixt](https://github.com/prixt)). + - Inpainting support. + - Improved web server GUI. + - Lots of code and documentation cleanups. + +- v1.13 (3 September 2022 + + - Support image variations (see [VARIATIONS](docs/features/VARIATIONS.md) ([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers) - Supports a Google Colab notebook for a standalone server running on Google hardware [Arturo Mendivil](https://github.com/artmen1516) - WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling [Kevin Gibbons](https://github.com/bakkot) - WebUI supports incremental display of in-progress images during generation [Kevin Gibbons](https://github.com/bakkot) - A new configuration file scheme that allows new models (including upcoming stable-diffusion-v1.5) - to be added without altering the code. ([David Wager](https://github.com/maddavid12)) + to be added without altering the code. ([David Wager](https://github.com/maddavid12)) - Can specify --grid on dream.py command line as the default. - Miscellaneous internal bug and stability fixes. - Works on M1 Apple hardware. - Multiple bug fixes. -For older changelogs, please visit **[CHANGELOGS](CHANGELOG.md)**. - -# Installation - -There are separate installation walkthroughs for [Linux](#linux), [Windows](#windows) and [Macintosh](#Macintosh) - -## Linux - -1. You will need to install the following prerequisites if they are not already available. Use your - operating system's preferred installer - -- Python (version 3.8.5 recommended; higher may work) -- git - -2. Install the Python Anaconda environment manager. - -``` -~$ wget https://repo.anaconda.com/archive/Anaconda3-2022.05-Linux-x86_64.sh -~$ chmod +x Anaconda3-2022.05-Linux-x86_64.sh -~$ ./Anaconda3-2022.05-Linux-x86_64.sh -``` - -After installing anaconda, you should log out of your system and log back in. If the installation -worked, your command prompt will be prefixed by the name of the current anaconda environment, "(base)". - -3. Copy the stable-diffusion source code from GitHub: - -``` -(base) ~$ git clone https://github.com/lstein/stable-diffusion.git -``` - -This will create stable-diffusion folder where you will follow the rest of the steps. - -4. Enter the newly-created stable-diffusion folder. From this step forward make sure that you are working in the stable-diffusion directory! - -``` -(base) ~$ cd stable-diffusion -(base) ~/stable-diffusion$ -``` - -5. Use anaconda to copy necessary python packages, create a new python environment named "ldm", - and activate the environment. - -``` -(base) ~/stable-diffusion$ conda env create -f environment.yaml -(base) ~/stable-diffusion$ conda activate ldm -(ldm) ~/stable-diffusion$ -``` - -After these steps, your command prompt will be prefixed by "(ldm)" as shown above. - -6. Load a couple of small machine-learning models required by stable diffusion: - -``` -(ldm) ~/stable-diffusion$ python3 scripts/preload_models.py -``` - -Note that this step is necessary because I modified the original -just-in-time model loading scheme to allow the script to work on GPU -machines that are not internet connected. See [Workaround for machines with limited internet connectivity](#workaround-for-machines-with-limited-internet-connectivity) - -7. Now you need to install the weights for the stable diffusion model. - -For running with the released weights, you will first need to set up an acount with Hugging Face (https://huggingface.co). -Use your credentials to log in, and then point your browser at https://huggingface.co/CompVis/stable-diffusion-v-1-4-original. -You may be asked to sign a license agreement at this point. - -Click on "Files and versions" near the top of the page, and then click on the file named "sd-v1-4.ckpt". You'll be taken -to a page that prompts you to click the "download" link. Save the file somewhere safe on your local machine. - -Now run the following commands from within the stable-diffusion directory. This will create a symbolic -link from the stable-diffusion model.ckpt file, to the true location of the sd-v1-4.ckpt file. - -``` -(ldm) ~/stable-diffusion$ mkdir -p models/ldm/stable-diffusion-v1 -(ldm) ~/stable-diffusion$ ln -sf /path/to/sd-v1-4.ckpt models/ldm/stable-diffusion-v1/model.ckpt -``` - -8. Start generating images! - -``` -# for the pre-release weights use the -l or --liaon400m switch -(ldm) ~/stable-diffusion$ python3 scripts/dream.py -l - -# for the post-release weights do not use the switch -(ldm) ~/stable-diffusion$ python3 scripts/dream.py - -# for additional configuration switches and arguments, use -h or --help -(ldm) ~/stable-diffusion$ python3 scripts/dream.py -h -``` - -9. Subsequently, to relaunch the script, be sure to run "conda activate ldm" (step 5, second command), enter the "stable-diffusion" - directory, and then launch the dream script (step 8). If you forget to activate the ldm environment, the script will fail with multiple ModuleNotFound errors. - -### Updating to newer versions of the script - -This distribution is changing rapidly. If you used the "git clone" method (step 5) to download the stable-diffusion directory, then to update to the latest and greatest version, launch the Anaconda window, enter "stable-diffusion", and type: - -``` -(ldm) ~/stable-diffusion$ git pull -``` - -This will bring your local copy into sync with the remote one. - -## Windows - -### Notebook install (semi-automated) - -We have a -[Jupyter notebook](https://github.com/lstein/stable-diffusion/blob/main/Stable-Diffusion-local-Windows.ipynb) -with cell-by-cell installation steps. It will download the code in this repo as -one of the steps, so instead of cloning this repo, simply download the notebook -from the link above and load it up in VSCode (with the -appropriate extensions installed)/Jupyter/JupyterLab and start running the cells one-by-one. - -Note that you will need NVIDIA drivers, Python 3.10, and Git installed -beforehand - simplified -[step-by-step instructions](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install) -are available in the wiki (you'll only need steps 1, 2, & 3 ). - -### Manual installs - -#### pip - -See -[Easy-peasy Windows install](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install) -in the wiki - -#### Conda - -1. Install Anaconda3 (miniconda3 version) from here: https://docs.anaconda.com/anaconda/install/windows/ - -2. Install Git from here: https://git-scm.com/download/win - -3. Launch Anaconda from the Windows Start menu. This will bring up a command window. Type all the remaining commands in this window. - -4. Run the command: - -``` -git clone https://github.com/lstein/stable-diffusion.git -``` - -This will create stable-diffusion folder where you will follow the rest of the steps. - -5. Enter the newly-created stable-diffusion folder. From this step forward make sure that you are working in the stable-diffusion directory! - -``` -cd stable-diffusion -``` - -6. Run the following two commands: - -``` -conda env create -f environment.yaml (step 6a) -conda activate ldm (step 6b) -``` - -This will install all python requirements and activate the "ldm" environment which sets PATH and other environment variables properly. - -7. Run the command: - -``` -python scripts\preload_models.py -``` - -This installs several machine learning models that stable diffusion -requires. (Note that this step is required. I created it because some people -are using GPU systems that are behind a firewall and the models can't be -downloaded just-in-time) - -8. Now you need to install the weights for the big stable diffusion model. - -For running with the released weights, you will first need to set up -an acount with Hugging Face (https://huggingface.co). Use your -credentials to log in, and then point your browser at -https://huggingface.co/CompVis/stable-diffusion-v-1-4-original. You -may be asked to sign a license agreement at this point. - -Click on "Files and versions" near the top of the page, and then click -on the file named "sd-v1-4.ckpt". You'll be taken to a page that -prompts you to click the "download" link. Now save the file somewhere -safe on your local machine. The weight file is >4 GB in size, so -downloading may take a while. - -Now run the following commands from **within the stable-diffusion -directory** to copy the weights file to the right place: - -``` -mkdir -p models\ldm\stable-diffusion-v1 -copy C:\path\to\sd-v1-4.ckpt models\ldm\stable-diffusion-v1\model.ckpt -``` - -Please replace "C:\path\to\sd-v1.4.ckpt" with the correct path to wherever -you stashed this file. If you prefer not to copy or move the .ckpt file, -you may instead create a shortcut to it from within -"models\ldm\stable-diffusion-v1\". - -9. Start generating images! - -``` -# for the pre-release weights -python scripts\dream.py -l - -# for the post-release weights -python scripts\dream.py -``` - -10. Subsequently, to relaunch the script, first activate the Anaconda -command window (step 3), enter the stable-diffusion directory (step 5, -"cd \path\to\stable-diffusion"), run "conda activate ldm" (step 6b), -and then launch the dream script (step 9). - -**Note:** Tildebyte has written an alternative ["Easy peasy Windows -install"](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install) -which uses the Windows Powershell and pew. If you are having trouble -with Anaconda on Windows, give this a try (or try it first!) - -### Updating to newer versions of the script - -This distribution is changing rapidly. If you used the "git clone" -method (step 5) to download the stable-diffusion directory, then to -update to the latest and greatest version, launch the Anaconda window, -enter "stable-diffusion", and type: - -``` -git pull -``` - -This will bring your local copy into sync with the remote one. - -## Macintosh - -See [README-Mac-MPS](README-Mac-MPS.md) for instructions. - -# Simplified API for text to image generation - -For programmers who wish to incorporate stable-diffusion into other -products, this repository includes a simplified API for text to image -generation, which lets you create images from a prompt in just three -lines of code: - -``` -from ldm.simplet2i import T2I -model = T2I() -outputs = model.txt2img("a unicorn in manhattan") -``` - -Outputs is a list of lists in the format [[filename1,seed1],[filename2,seed2]...] -Please see ldm/simplet2i.py for more information. A set of example scripts is -coming RSN. - -# Workaround for machines with limited internet connectivity - -My development machine is a GPU node in a high-performance compute -cluster which has no connection to the internet. During model -initialization, stable-diffusion tries to download the Bert tokenizer -and a file needed by the kornia library. This obviously didn't work -for me. - -To work around this, I have modified ldm/modules/encoders/modules.py -to look for locally cached Bert files rather than attempting to -download them. For this to work, you must run -"scripts/preload_models.py" once from an internet-connected machine -prior to running the code on an isolated one. This assumes that both -machines share a common network-mounted filesystem with a common -.cache directory. - -``` -(ldm) ~/stable-diffusion$ python3 ./scripts/preload_models.py -preloading bert tokenizer... -Downloading: 100%|██████████████████████████████████| 28.0/28.0 [00:00<00:00, 49.3kB/s] -Downloading: 100%|██████████████████████████████████| 226k/226k [00:00<00:00, 2.79MB/s] -Downloading: 100%|██████████████████████████████████| 455k/455k [00:00<00:00, 4.36MB/s] -Downloading: 100%|██████████████████████████████████| 570/570 [00:00<00:00, 477kB/s] -...success -preloading kornia requirements... -Downloading: "https://github.com/DagnyT/hardnet/raw/master/pretrained/train_liberty_with_aug/checkpoint_liberty_with_aug.pth" to /u/lstein/.cache/torch/hub/checkpoints/checkpoint_liberty_with_aug.pth -100%|███████████████████████████████████████████████| 5.10M/5.10M [00:00<00:00, 101MB/s] -...success -``` +For older changelogs, please visit **[CHANGELOGS](docs/CHANGELOG.md)**. # Troubleshooting -Here are a few common installation problems and their solutions. Often -these are caused by incomplete installations or crashes during the -install process. - -- PROBLEM: During "conda env create -f environment.yaml", conda - hangs indefinitely. - -- SOLUTION: Enter the stable-diffusion directory and completely - remove the "src" directory and all its contents. The safest way - to do this is to enter the stable-diffusion directory and - give the command "git clean -f". If this still doesn't fix - the problem, try "conda clean -all" and then restart at the - "conda env create" step. - ---- - -- PROBLEM: dream.py crashes with the complaint that it can't find - ldm.simplet2i.py. Or it complains that function is being passed - incorrect parameters. - -- SOLUTION: Reinstall the stable diffusion modules. Enter the - stable-diffusion directory and give the command "pip install -e ." - ---- - -- PROBLEM: dream.py dies, complaining of various missing modules, none - of which starts with "ldm". - -- SOLUTION: From within the stable-diffusion directory, run "conda env - update -f environment.yaml" This is also frequently the solution to - complaints about an unknown function in a module. - ---- - -- PROBLEM: There's a feature or bugfix in the Stable Diffusion GitHub - that you want to try out. - -- SOLUTION: If the fix/feature is on the "main" branch, enter the stable-diffusion - directory and do a "git pull". Usually this will be sufficient, but if - you start to see errors about missing or incorrect modules, use the - command "pip install -e ." and/or "conda env update -f environment.yaml" - (These commands won't break anything.) - -- If the feature/fix is on a branch (e.g. "foo-bugfix"), the recipe is similar, but - do a "git pull ". - -- If the feature/fix is in a pull request that has not yet been made - part of the main branch or a feature/bugfix branch, then from the page - for the desired pull request, look for the line at the top that reads - "xxxx wants to merge xx commits into lstein:main from YYYYYY". Copy - the URL in YYYY. It should have the format - https://github.com//stable-diffusion/tree/ - -- Then **go to the directory above stable-diffusion**, and rename the - directory to "stable-diffusion.lstein", "stable-diffusion.old", or - whatever. You can then git clone the branch that contains the - pull request: - -``` -git clone https://github.com//stable-diffusion/tree/ -``` - -You will need to go through the install procedure again, but it should -be fast because all the dependencies are already loaded. +Please check out our **[Q&A](docs/help/TROUBLESHOOT.md)** to get solutions for common installation problems and other issues. # Contributing -Anyone who wishes to contribute to this project, whether -documentation, features, bug fixes, code cleanup, testing, or code -reviews, is very much encouraged to do so. If you are unfamiliar with -how to contribute to GitHub projects, here is a [Getting Started -Guide](https://opensource.com/article/19/7/create-pull-request-github). +Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code cleanup, testing, or code reviews, is very much encouraged to do so. If you are unfamiliar with +how to contribute to GitHub projects, here is a [Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). -A full set of contribution guidelines, along with templates, are in -progress, but for now the most important thing is to **make your pull -request against the "development" branch**, and not against -"main". This will help keep public breakage to a minimum and will -allow you to propose more radical changes. +A full set of contribution guidelines, along with templates, are in progress, but for now the most important thing is to **make your pull request against the "development" branch**, and not against "main". This will help keep public breakage to a minimum and will allow you to propose more radical changes. + +## **Contributors** + +This fork is a combined effort of various people from across the world. [Check out the list of all these amazing people](docs/CONTRIBUTORS.md). We thank them for their time, hard work and effort. # Support @@ -783,22 +157,9 @@ For support, please use this repository's GitHub Issues tracking service. Feel free to send me an email if you use and like the script. -_Original Author:_ Lincoln D. Stein - -_Contributions by:_ -[Peter Kowalczyk](https://github.com/slix), [Henry Harrison](https://github.com/hwharrison), -[xraxra](https://github.com/xraxra), [bmaltais](https://github.com/bmaltais), [Sean McLellan](https://github.com/Oceanswave), -[nicolai256](https://github.com/nicolai256), [Benjamin Warner](https://github.com/warner-benjamin), -[tildebyte](https://github.com/tildebyte),[yunsaki](https://github.com/yunsaki), [James Reynolds][https://github.com/magnusviri], -[Tesseract Cat](https://github.com/TesseractCat), and many more! - -(If you have contributed and don't see your name on the list of -contributors, please let lstein know about the omission, or make a -pull request) - Original portions of the software are Copyright (c) 2020 Lincoln D. Stein (https://github.com/lstein) # Further Reading Please see the original README for more information on this software -and underlying algorithm, located in the file [README-CompViz.md](README-CompViz.md). +and underlying algorithm, located in the file [README-CompViz.md](docs/README-CompViz.md). diff --git a/configs/stable-diffusion/v1-finetune.yaml b/configs/stable-diffusion/v1-finetune.yaml index 5d608811de..7bc31168e7 100644 --- a/configs/stable-diffusion/v1-finetune.yaml +++ b/configs/stable-diffusion/v1-finetune.yaml @@ -105,5 +105,6 @@ lightning: trainer: benchmark: True - max_steps: 4000 + max_steps: 4000000 +# max_steps: 4000 \ No newline at end of file diff --git a/CHANGELOG.md b/docs/CHANGELOG.md similarity index 99% rename from CHANGELOG.md rename to docs/CHANGELOG.md index b9cd9d6d0d..31dc8e80db 100644 --- a/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -134,4 +134,4 @@ ## Links -- **[Read Me](readme.md)** +- **[Read Me](../readme.md)** diff --git a/docs/CONTRIBUTORS.md b/docs/CONTRIBUTORS.md new file mode 100644 index 0000000000..57a9d5cd38 --- /dev/null +++ b/docs/CONTRIBUTORS.md @@ -0,0 +1,61 @@ +# Contributors + +The list of all the amazing people who have contributed to the various features that you get to experience in this fork. + +We thank them for all of their time and hard work. + +_Original Author:_ + +- Lincoln D. Stein + +_Contributions by:_ + +- [Sean McLellan](https://github.com/Oceanswave) +- [Kevin Gibbons](https://github.com/bakkot) +- [Tesseract Cat](https://github.com/TesseractCat) +- [blessedcoolant](https://github.com/blessedcoolant) +- [David Ford](https://github.com/david-ford) +- [yunsaki](https://github.com/yunsaki) +- [James Reynolds](https://github.com/magnusviri) +- [David Wager](https://github.com/maddavid123) +- [Jason Toffaletti](https://github.com/toffaletti) +- [tildebyte](https://github.com/tildebyte) +- [Cragin Godley](https://github.com/cgodley) +- [BlueAmulet](https://github.com/BlueAmulet) +- [Benjamin Warner](https://github.com/warner-benjamin) +- [Cora Johnson-Roberson](https://github.com/corajr) +- [veprogames](https://github.com/veprogames) +- [JigenD](https://github.com/JigenD) +- [Niek van der Maas](https://github.com/Niek) +- [Henry van Megen](https://github.com/hvanmegen) +- [Håvard Gulldahl](https://github.com/havardgulldahl) +- [greentext2](https://github.com/greentext2) +- [Simon Vans-Colina](https://github.com/simonvc) +- [Gabriel Rotbart](https://github.com/gabrielrotbart) +- [Eric Khun](https://github.com/erickhun) +- [Brent Ozar](https://github.com/BrentOzar) +- [nderscore](https://github.com/nderscore) +- [Mikhail Tishin](https://github.com/tishin) +- [Tom Elovi Spruce](https://github.com/ilovecomputers) +- [spezialspezial](https://github.com/spezialspezial) +- [Yosuke Shinya](https://github.com/shinya7y) +- [Andy Pilate](https://github.com/Cubox) +- [Muhammad Usama](https://github.com/SMUsamaShah) +- [Arturo Mendivil](https://github.com/artmen1516) +- [Paul Sajna](https://github.com/sajattack) +- [Samuel Husso](https://github.com/shusso) +- [nicolai256](https://github.com/nicolai256) + +_Original CompVis Authors:_ + +- [Robin Rombach](https://github.com/rromb) +- [Patrick von Platen](https://github.com/patrickvonplaten) +- [ablattmann](https://github.com/ablattmann) +- [Patrick Esser](https://github.com/pesser) +- [owenvincent](https://github.com/owenvincent) +- [apolinario](https://github.com/apolinario) +- [Charles Packer](https://github.com/cpacker) + +--- + +_If you have contributed and don't see your name on the list of contributors, please let one of the collaborators know about the omission, or feel free to make a pull request._ diff --git a/README-CompViz.md b/docs/README-CompViz.md similarity index 83% rename from README-CompViz.md rename to docs/README-CompViz.md index b2f75bbaf1..ed7df6a4ea 100644 --- a/README-CompViz.md +++ b/docs/README-CompViz.md @@ -1,5 +1,6 @@ # Original README from CompViz/stable-diffusion -*Stable Diffusion was made possible thanks to a collaboration with [Stability AI](https://stability.ai/) and [Runway](https://runwayml.com/) and builds upon our previous work:* + +_Stable Diffusion was made possible thanks to a collaboration with [Stability AI](https://stability.ai/) and [Runway](https://runwayml.com/) and builds upon our previous work:_ [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://ommer-lab.com/research/latent-diffusion-models/)
[Robin Rombach](https://github.com/rromb)\*, @@ -12,16 +13,15 @@ which is available on [GitHub](https://github.com/CompVis/latent-diffusion). PDF at [arXiv](https://arxiv.org/abs/2112.10752). Please also visit our [Project page](https://ommer-lab.com/research/latent-diffusion-models/). -![txt2img-stable2](assets/stable-samples/txt2img/merged-0006.png) +![txt2img-stable2](../assets/stable-samples/txt2img/merged-0006.png) [Stable Diffusion](#stable-diffusion-v1) is a latent text-to-image diffusion model. -Thanks to a generous compute donation from [Stability AI](https://stability.ai/) and support from [LAION](https://laion.ai/), we were able to train a Latent Diffusion Model on 512x512 images from a subset of the [LAION-5B](https://laion.ai/blog/laion-5b/) database. -Similar to Google's [Imagen](https://arxiv.org/abs/2205.11487), +Thanks to a generous compute donation from [Stability AI](https://stability.ai/) and support from [LAION](https://laion.ai/), we were able to train a Latent Diffusion Model on 512x512 images from a subset of the [LAION-5B](https://laion.ai/blog/laion-5b/) database. +Similar to Google's [Imagen](https://arxiv.org/abs/2205.11487), this model uses a frozen CLIP ViT-L/14 text encoder to condition the model on text prompts. With its 860M UNet and 123M text encoder, the model is relatively lightweight and runs on a GPU with at least 10GB VRAM. See [this section](#stable-diffusion-v1) below and the [model card](https://huggingface.co/CompVis/stable-diffusion). - ## Requirements A suitable [conda](https://conda.io/) environment named `ldm` can be created @@ -44,16 +44,16 @@ pip install -e . Stable Diffusion v1 refers to a specific configuration of the model architecture that uses a downsampling-factor 8 autoencoder with an 860M UNet -and CLIP ViT-L/14 text encoder for the diffusion model. The model was pretrained on 256x256 images and +and CLIP ViT-L/14 text encoder for the diffusion model. The model was pretrained on 256x256 images and then finetuned on 512x512 images. -*Note: Stable Diffusion v1 is a general text-to-image diffusion model and therefore mirrors biases and (mis-)conceptions that are present -in its training data. +\*Note: Stable Diffusion v1 is a general text-to-image diffusion model and therefore mirrors biases and (mis-)conceptions that are present +in its training data. Details on the training procedure and data, as well as the intended use of the model can be found in the corresponding [model card](https://huggingface.co/CompVis/stable-diffusion). Research into the safe deployment of general text-to-image models is an ongoing effort. To prevent misuse and harm, we currently provide access to the checkpoints only for [academic research purposes upon request](https://stability.ai/academia-access-form). -**This is an experiment in safe and community-driven publication of a capable and general text-to-image model. We are working on a public release with a more permissive license that also incorporates ethical considerations.*** +**This is an experiment in safe and community-driven publication of a capable and general text-to-image model. We are working on a public release with a more permissive license that also incorporates ethical considerations.\*** -[Request access to Stable Diffusion v1 checkpoints for academic research](https://stability.ai/academia-access-form) +[Request access to Stable Diffusion v1 checkpoints for academic research](https://stability.ai/academia-access-form) ### Weights @@ -64,36 +64,37 @@ which were trained as follows, 194k steps at resolution `512x512` on [laion-high-resolution](https://huggingface.co/datasets/laion/laion-high-resolution) (170M examples from LAION-5B with resolution `>= 1024x1024`). - `sd-v1-2.ckpt`: Resumed from `sd-v1-1.ckpt`. 515k steps at resolution `512x512` on "laion-improved-aesthetics" (a subset of laion2B-en, -filtered to images with an original size `>= 512x512`, estimated aesthetics score `> 5.0`, and an estimated watermark probability `< 0.5`. The watermark estimate is from the LAION-5B metadata, the aesthetics score is estimated using an [improved aesthetics estimator](https://github.com/christophschuhmann/improved-aesthetic-predictor)). + filtered to images with an original size `>= 512x512`, estimated aesthetics score `> 5.0`, and an estimated watermark probability `< 0.5`. The watermark estimate is from the LAION-5B metadata, the aesthetics score is estimated using an [improved aesthetics estimator](https://github.com/christophschuhmann/improved-aesthetic-predictor)). - `sd-v1-3.ckpt`: Resumed from `sd-v1-2.ckpt`. 195k steps at resolution `512x512` on "laion-improved-aesthetics" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598). Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0) and 50 PLMS sampling steps show the relative improvements of the checkpoints: -![sd evaluation results](assets/v1-variants-scores.jpg) - - +![sd evaluation results](../assets/v1-variants-scores.jpg) ### Text-to-Image with Stable Diffusion -![txt2img-stable2](assets/stable-samples/txt2img/merged-0005.png) -![txt2img-stable2](assets/stable-samples/txt2img/merged-0007.png) + +![txt2img-stable2](../assets/stable-samples/txt2img/merged-0005.png) +![txt2img-stable2](../assets/stable-samples/txt2img/merged-0007.png) Stable Diffusion is a latent diffusion model conditioned on the (non-pooled) text embeddings of a CLIP ViT-L/14 text encoder. - #### Sampling Script After [obtaining the weights](#weights), link them + ``` mkdir -p models/ldm/stable-diffusion-v1/ -ln -s models/ldm/stable-diffusion-v1/model.ckpt -``` -and sample with -``` -python scripts/txt2img.py --prompt "a photograph of an astronaut riding a horse" --plms +ln -s models/ldm/stable-diffusion-v1/model.ckpt ``` -By default, this uses a guidance scale of `--scale 7.5`, [Katherine Crowson's implementation](https://github.com/CompVis/latent-diffusion/pull/51) of the [PLMS](https://arxiv.org/abs/2202.09778) sampler, +and sample with + +``` +python scripts/txt2img.py --prompt "a photograph of an astronaut riding a horse" --plms +``` + +By default, this uses a guidance scale of `--scale 7.5`, [Katherine Crowson's implementation](https://github.com/CompVis/latent-diffusion/pull/51) of the [PLMS](https://arxiv.org/abs/2202.09778) sampler, and renders images of size 512x512 (which it was trained on) in 50 steps. All supported arguments are listed below (type `python scripts/txt2img.py --help`). ```commandline @@ -131,73 +132,72 @@ optional arguments: evaluate at this precision ``` -Note: The inference config for all v1 versions is designed to be used with EMA-only checkpoints. + +Note: The inference config for all v1 versions is designed to be used with EMA-only checkpoints. For this reason `use_ema=False` is set in the configuration, otherwise the code will try to switch from non-EMA to EMA weights. If you want to examine the effect of EMA vs no EMA, we provide "full" checkpoints which contain both types of weights. For these, `use_ema=False` will load and use the non-EMA weights. - #### Diffusers Integration Another way to download and sample Stable Diffusion is by using the [diffusers library](https://github.com/huggingface/diffusers/tree/main#new--stable-diffusion-is-now-fully-compatible-with-diffusers) + ```py # make sure you're logged in with `huggingface-cli login` from torch import autocast from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler pipe = StableDiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-3-diffusers", + "CompVis/stable-diffusion-v1-3-diffusers", use_auth_token=True ) prompt = "a photo of an astronaut riding a horse on mars" with autocast("cuda"): - image = pipe(prompt)["sample"][0] - + image = pipe(prompt)["sample"][0] + image.save("astronaut_rides_horse.png") ``` - - ### Image Modification with Stable Diffusion -By using a diffusion-denoising mechanism as first proposed by [SDEdit](https://arxiv.org/abs/2108.01073), the model can be used for different -tasks such as text-guided image-to-image translation and upscaling. Similar to the txt2img sampling script, -we provide a script to perform image modification with Stable Diffusion. +By using a diffusion-denoising mechanism as first proposed by [SDEdit](https://arxiv.org/abs/2108.01073), the model can be used for different +tasks such as text-guided image-to-image translation and upscaling. Similar to the txt2img sampling script, +we provide a script to perform image modification with Stable Diffusion. The following describes an example where a rough sketch made in [Pinta](https://www.pinta-project.com/) is converted into a detailed artwork. + ``` python scripts/img2img.py --prompt "A fantasy landscape, trending on artstation" --init-img --strength 0.8 ``` -Here, strength is a value between 0.0 and 1.0, that controls the amount of noise that is added to the input image. + +Here, strength is a value between 0.0 and 1.0, that controls the amount of noise that is added to the input image. Values that approach 1.0 allow for lots of variations but will also produce images that are not semantically consistent with the input. See the following example. **Input** -![sketch-in](assets/stable-samples/img2img/sketch-mountains-input.jpg) +![sketch-in](../assets/stable-samples/img2img/sketch-mountains-input.jpg) **Outputs** -![out3](assets/stable-samples/img2img/mountains-3.png) -![out2](assets/stable-samples/img2img/mountains-2.png) +![out3](../assets/stable-samples/img2img/mountains-3.png) +![out2](../assets/stable-samples/img2img/mountains-2.png) This procedure can, for example, also be used to upscale samples from the base model. - -## Comments +## Comments - Our codebase for the diffusion models builds heavily on [OpenAI's ADM codebase](https://github.com/openai/guided-diffusion) -and [https://github.com/lucidrains/denoising-diffusion-pytorch](https://github.com/lucidrains/denoising-diffusion-pytorch). -Thanks for open-sourcing! - -- The implementation of the transformer encoder is from [x-transformers](https://github.com/lucidrains/x-transformers) by [lucidrains](https://github.com/lucidrains?tab=repositories). + and [https://github.com/lucidrains/denoising-diffusion-pytorch](https://github.com/lucidrains/denoising-diffusion-pytorch). + Thanks for open-sourcing! +- The implementation of the transformer encoder is from [x-transformers](https://github.com/lucidrains/x-transformers) by [lucidrains](https://github.com/lucidrains?tab=repositories). ## BibTeX ``` @misc{rombach2021highresolution, - title={High-Resolution Image Synthesis with Latent Diffusion Models}, + title={High-Resolution Image Synthesis with Latent Diffusion Models}, author={Robin Rombach and Andreas Blattmann and Dominik Lorenz and Patrick Esser and Björn Ommer}, year={2021}, eprint={2112.10752}, @@ -206,5 +206,3 @@ Thanks for open-sourcing! } ``` - - diff --git a/static/colab_notebook.png b/docs/assets/colab_notebook.png similarity index 100% rename from static/colab_notebook.png rename to docs/assets/colab_notebook.png diff --git a/static/dream-py-demo.png b/docs/assets/dream-py-demo.png similarity index 100% rename from static/dream-py-demo.png rename to docs/assets/dream-py-demo.png diff --git a/static/dream_web_server.png b/docs/assets/dream_web_server.png similarity index 100% rename from static/dream_web_server.png rename to docs/assets/dream_web_server.png diff --git a/docs/assets/logo.png b/docs/assets/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..fa0548ff78c7171d850ee6451b6a36f1559f5f3e GIT binary patch literal 22220 zcmc$_WmKF&(w~M)@ zJ;a070%Bw9Bu;hc-_wSD932GRCa)3Nlmw-m9Wmz1Ct^A-gJ zI6^$kX}ujCoZLmd#p(YcR}^^t{4qB@?LV4$*o)IkKMzO?R@I<=?d%4j72@LMwB+I8 zp%oJ0f>@dJS@2tP(DL!{@pAJ5KOjzCK2d%iQ4k;Pzy8q!!?{^mi)zWr|7$Sdoj5(z z!^1_Co7>CFi_43j%h}C_n^#0cgqw$tn~#qZXu;|3K29DH;75l?M3fiMnh-mnJ|3QbHdO@-!OGmj{J+=O z*~-@1=fBfbRaI2c$=$=;$r7R{D^3qgip$p4N>l(MA|z-bV!;XG;}rz{fOt7AM0l+@ zg$4M{t*nJbtOTqC{xL70zpS&R=kxYG_y4EeS~*(+GXAqeqUM4S9&-ypOHN@SAxlnv zOOO?(1;`4-DIx^2;^!0K1pzPqElu6c7C1WQ4*#pv=UG_+68S9!tU=ZSe4IiemX@4W zf*^iQ5fMv4PHSFaAwGz(xw(jdFg>l6rKq*Do1-~kC$^5}HV|$XCmVX&|2do2&JNCQ zs?Juxp7TFjGawyU#T#39Ky^OnAj zZL1~J+{p$4cusEm=f!Y8yO@6}h5LWk*FV1dU)<5Zn*%=X`Q?ASEAZw&{u$x~h;{?K zugXS?KOCG!v7)TBj`x>?uRiHax;{^m-&32f+SJs&MkKHhyQ_ZDV+`aLG3K(YwOLr; z3oze~_b?*Rr$gz}NmXS#k#vaR;o%_)v}K4`-F$a#=e~EBi)kh9fk6|ksv|1uS1RgW z%YNTSwta3xb_|U@OPnSfmd>)gUg#U$(n8FrUHDuGtKguhTL?o^#v&;BqtISPcr3W) zU}`h=Dr#@)8R7A;YU{tkfh!Ix0p2p6VzkzOXUOBMPbc80I$SONFENfOT}Y1cvKm#{ z$2q|^2-)yU5jJwRpbUohma&rA@VOli9o)f^D%0?BwsS|Yj`0r5j_;!u1*4QZAOsqe zK-@AEi9yicL7k&xdNKYJ=9LN7r3U%DVw`xy!1(R;+uu5e-X%zTq+*cPg6k*pIQd$* zMx1K~mpPq%7dE6lnv=4UvqCdd^0=*E>sTwSD}L{r^SWP@5wDo9#B@`I%jbP2x|cK~ zs&c7$m0o7nitUXpY+r*q1qIb65`^f7T&Zs*ykr2=AX39HPbZ+DjMq$X3poBa8c)X> z$a~zu6u1|(z@Ypnl1Ps`8jmX)NPCza3Anbx&mT%GqqKQeQP7l`1t4Ab=SSv^LP6{9 zBFEFHwX2V7L4$;JMPFdC^FvC@jh%s6f}fYtBCcODF{BQ#8SUbmHWJ zuFNZ|ClZwERgXB%qD6otewf9|GVb|m2vRBL_2CaJ1)xpuwz9Xt+2y)asOR7hY`N!N zTlLj}BB^FyM+hN$_8{^hMgz3_*|v5{Q~j_<4FVToexpqoEhf9(5XV^4bPJ)`sqbTaowJ`6URSJEVUS>ju7F;uU<} zAvbs8EJ=IWs-=)Lq33y~M0V``5}*guur2>n+7ha>N1#Nj|N0*&2vr6x#GI5gG6WAr zrsYLyezJ^GbTXF`YqA!Ea_5ITxp$}ls_@SiOF#}>q3zn)&B+}i(0ZNl*Y?@QnF!^} z_sLGK$|1HHGxe&pcs=!wD&t+Qlx|HN5i-=Q5Q7m&j4iTS&9QG}G`b+5)bU^~jKLbT z_Y6RDwiF8}^kZj8AaEpj#3&iQs(l*V1jII0V6uecfm0(K@6b@f!ew5NQ)ECL?Y~!7 zf4;p%xqpP4b%^v3jYxLm-YZiQ8A6C>yJ7)l%tmU(NYTDgRps3ANGvxLsWeRs4Z z4HA9BNA@yp(2^9T(AFZu+Vw5V@jhlmG6t)$(KIIp8B&JOg_H8p`W%89F7?U~4|_X|AM`rTmU?XRQK8iR^3_ z;ZWNQ?}`LB)1V_UwN*3qgz95+qAN}^$*XN*4U}AET*Lg`MUSvx+mCINyAb_D7ob10u(9LGq z*0?r?dW5(npsk>zWMLq9ae;k)aZ%-SCZDKygi_vx3Dy&PSxQfc!5{P}Bf9mDmooO# z$aZf_D$@%n6iT0>RAt`%HZn3&z7TU0KPs_Xm=tYna&f5T5SdY1k?-^G7w+oGhj+cb z*14nbCcZm9dX*-y!^P&~8-H1P0dT6oX8f59ojUys1zK<;6#eBF7rs8~X8gfZUJwZE z$Q`-O@y)U@zA(9~{$WCvUhLtV;Qr)9AqpEi&RoGV1og1syNyrelgu=6`fY7 zB5QG_a}P*0%V$e5%m36I*iJ=8t=IPU6dgaxPiN~B3JZzzlO1MjqA&KQPuF5cPU;)J z4r(e;y%v*D5&c?{YQF%5=j8YNF&1|5ivjnzd3jOui@ng$#A*UK94}vsHPCOnoH5aL z>ERHQg%Bf8FS$j?Ut1fqvh9>l=<(DyoQLgj4wLSeb*-4oQ$UZ!YyTBQn$DvuZ5F}- z@gIsg$z34Tw}$gGb@r%N%TKRN0z9!pq=GLm<3BlCTTo^}7NfLc;xead9p|mpL^e6U z1YG)n1nThOa8_Dt+2)`{bu5uB>OxAalJsTvz+l)f~( z2Jwk2wK;7l^q+Vn+7tSy{h}@}ahA>twaF+cNvpre|?$Bh6_G zHee*#3QBT3q_Sv=N8fVu)c!R7k<#6c{oz0=h=t2mf2q6$3acbdI_tt5aXTOFGcypZ z*n?Zz9i^f1i^|E7o|_Zp!B!mkmfQPM*!nX(4&$gfjaDD2;GEw`t(i!acH}7v>@?10pYIZ%??qY|0;3#OkI^QXC&|M$H~c&`NA8 zvUsEnGksA0SfGp@m3A~oQBO+{&fcEAzH7Sm7b<}Ez8f%{{n^^gyab7QJ0xYLw7H)s zSagjyg-?)4%#lA66QRpTl|@p+iBLhIhL3N0nl@7@-Y^riJRBS`5>wD;kLX!kht)7I zS=#}p$cXFx-cPwC(lN5@^X!=)S#}-vToZcvCHh=innB6^rp8w84~xi@(`Sf^JBcIP zysAf${8(W!ot>V>9D{cQ?=4~~{U@w%ZipRN&g%4I-ZXSB2Tp9wy~40M!)PD5E6PY+ zE=Zo3n6N~w0AF*J`rBefmhdLIU2h>NARU zoL>mIIv_mtSvna$BO@m-d82IKi&RQjzfwTqJx+)vEr^Q6qJuI(gQ*h`1uw@p^a~N^ z1BRxC-$TWDE{moMAr{M8HVm-kY!o|mToqqL`MZEft0dxf%@}-9Nr@h%{1MH=? z&PP&cu7B1^x(G^S-;T~7J*cXdf6JR(`LT{QXNYBoV20+x)T0&S=tpSyuIU_$#f>Z) zf(d+s&^G2N)>wV_08dUqaq~cP*b7n}7P(r)!a85qOIHWxx+f)k*fS69?Q=Ef8}Z;B z%;Upb@_BUv>u=+9%g4;;K z>U66ci`WtWKO?Babzqkd=+f}E$gId@n-5QI*Wc``rKgpWY9&|J?ZaZWO+2nA#gUy_ z#781Ixu_0MR#$qubd^ZlDBr(2YKZ{cCougYQAX_u#m zD?&usm+@dBtdU0Jb{LEsNk5RY$GyFH)&2?3tP>yco$>?l2vT!Y&rU|V1D2fxhta+bTWA$u*j5}bIKT^|kb!0OCw zK}oW#a>A5!U)5Yk#M_Z+NwHL~T@0Cfv|~8ujk(17M$Wt@kB0_B!|6NrxJoKXZ-Sz< zEg`{d!E;>Zd4su|%+n?mTb&g4;3?ahz}VR2EL;aMCHfYd5w64bCu*4-o=9Hwgh$Xf zWaYfij528SpUA2G_Tir|lh?1;!%IU_(Z$0O+X7*dPpVP#fmAu})eTA&k%R*)#|#V+ zHm$|RSuU*y{;|4O6UU#tZf9${nG7{lK;21NV+#udCAuRuz87p685wN!9IpY*@uMee zO-u^`*Y6&1EhfQ!q@6tV60pmO;k4gKr#t%#p@res8LP!<9K>pV5p3f+neM7|iaDN4 zK%H$eOdR6Pff}U7K@7w$XApbVyOF{=S*cfY@}B38RDk!@Cd(($h)aPM&#Z+>e>}L; zTu!XwdNC;cro;5sygXKkCEcu^=~$*k4otg?oGVSci$u?ex7GOmqDtyOi-+ZAn&$oW zojUStQ(wVKk!!$PCB@x6ikY?mcJ8)oO2FSj8fpf(z?1di#?dO5;+?+@HqD*^s zi$ABq&6h;v6n)uT=i+F#*h!E!){REXF z-zn+5m97l|JJ4r@dmZ3_u*Tho$Jv{9-_vp3XiLt&8wmTk1UU2Op@Ecgd_&go%kp6r}y`ri_nq<$WZ12&K6?pSp9Gwe5= zB`>L{eR@~>(D|29KUgx=lE^7vGfqG9 z{NCkE+%|Csi;f7JiREj>yvdJTZ1y_cEiDyU|KR@1iVuRx{FS#eQOlfk>&>VLT?Y>l zuB7QoYuZdzNASK%+!k6VN@yr*mM04#VI51;%RXB}%GBbR@rxnz^_*phB>qQP%Krp?!z0*GrxT)PZw!nV-xs?79)=FZFKn!w*_xzME<- zxY%SBGIydbudW|GiFLFPVD|{_Koa)-CyHk573VLjGb99Qa*E7fo9r)73|J$Uk8IX^>ll;J zfmo~;+w#{I(Le~zlOtY+bZa17b_$|J*45bKSlamg-(JdZf4^9+I1BXMJMH2{St!9pY@UL+V@JxpOi(*+C4{m z^YxC_vo)5{uv`L@(jBv2q(N7ViRtND|C{PE!z*$vHviOb6)1r>Qt$@UofUytf9-_O zZaY&#z5UaHh!|5h9+Lxo{Vh;(GFG4#`sh zL7yCG*m5)ETu@t^nUixWMrmn2FJaOB+F`D-4LsuLuW+}un5h0(oUOgI9j1C10<&wA zwn$i0m&n{m;3jVQdmbFZ-WT*KS)vpjot!v*pG?F5ktV67es~l0iSpctH|#DpUyIh4 zY5Cee*mObP>?&Bc`ox3!aRI3!}Jrvq6hDtF!p}LB3XTYkhRzdxIJ)GMg_xf>%p^FF83K z0mX^FWlYh6jJ?8@GhL>ho8NYsGB~2m9?{oXJkenJmCBj9L-Ut`lyVvV>o9AmbXd@J zK8=w;ebOTGC>;wH>+r2hqV66BN7j+St3$ewP-&rHUD6S3>Lbt2RBT;7nz!!8G&w3I%nFbm)9dm$Y6 z5G3Fy;I5zD#LlNY&86KQa)PU(uG*jI0jC-zkIK%RfZb95aDI8noTQe(4wmVtFoE4I zIn|yI5$~to#ik%=BBqBXJfytTQ{Kva&!Lm&%7o+}T7i>9vy{A99&?+4*iMQN5z#()2H0;uo)m(cH5Prc!m^ zUQ{N45mQcy%ZZi7Kbg_~>Vhc?2W~2|6p+SaAEYU1mZaev$9uJ8%Ec1bS`XP-R2K<} zv#i%~e|8Zd_G3aTjd-U33})z{m%v*ikh zGflO9i-n6@Yd@_<%*m$yYMiWc%8V2R$fn0Z2GwicdS${{D$EC;ABgw_iivfqt#xSe zI(SspPJ|<59)o>MBh5c};2rK}jD#Y`_FM76tcf<8Z%xXzrD<3CY8>#Rg^P7+a(gg3 z>RYR^aW2=d?)nx5Kk5D60|`HYFmvsG9tC9-)XSS?pqh5u(dUP6iAsam zZklPVuMaZm(P)YS7E&Ag4+C1zg)U=I>urC2R2qj*;UA9N=OP*1Rdex8gUr z59w%}|LFq^mGK7->*%Qo4dE4IyH9@b2zQ|sn=o-8MSkA(=4<4(JGDVSk^N>(nbG== zjc_LBD{*5|{i+uG6@eyfq)`6AC!HmGmAqmNm2t*TQAyd~rkZqu>4bgTgal}`W67~h z4A)QZ-=*!#c@#t(&;Rt`JvG2^&D9uQ2k(lR$3-xDvx?5>c2#ec<@xua2L?FgOf zjqWoyV4ihI)nSW<#7S~y;rsw%NYrSpl9>qKd(?SXqE*@nx$x;DR}Lh=-7>0IrdTrG%Zm;H=-!-F^y!7-PBQi8QWD?)k|moA)l?To+QJ4RP&hA8 zKDW?=;`#4g@Vs%Z)`<;&cu<%mbXNg#dFLH$-^jTPNiV=hV!O(WY3q*wqzrpUNP zrdPai%D>JZNlOdrz1fB#q28ug-fc&=E02 zV9m=8(#`M`ix04ITr8F;2M=@=4ft7CN&ve-kC1_|t_kIZG8N?4*T?%y^c^%TcMfv5 zN8zCawgn&fW(g~=_}cgNk5KHOxCg@dzy^$n8_!0N2};vIo#|Fy$>eS}Po zhCklm15e2dO{lHyX0o>0c7Bo?kQpTnQP0>VYvE&|pht8)#~2iXWekFZFxN%J@2ziG zt2r|xd*xGU0yr>FT!xMEnnAwMSV>Bt&Rz#XSpWy zeZWW^*?x`I2`uopGIx1yUY>iUR-3l!4&7X>jnLDbU71hR2xy^>_ZHJYkzuJ z6uarM6t0LJ*=A>!SHJYd%=hTubQ8PV*Z$%+?9+1bp*E`Ty+WdSf)xg`dKLXAw06rV!dUbF_Lb^T2NG4x*h$JdNV~| zZ+vz(&K3g$8Foo6!bzks{}%b;g){(4`d#jCb)p81z(|4+xCp+uUnS<|=8kXfwJ$C3x&YkiWd2o9m~Fi`Fj-NyL3-h5RC%?+qC;6$5Nh9szi|+hMf+JV98o(-ANsk6UI#}6J}Bj4-FzgqG6T)#9LW#JgK zI9zbjEBCd|RXssQYLGBmQO?Gdih9T_OYhL@pO@HJKh>4yda#Z3`N~``V%@SMZTXo8 zPkW=rL0R8~Q&%~qY;1j|@`Fb-1BLboB)7!1zJB(@U>R%to2sqj$ zrGkREUyrinRC(1pFLT}uC_LP+^hy6>PcLDuMhBrUomnQ~`ZI)$Tq5x&De{8|A&>VF z`C3ayiTy|bK*>mo<;&!}7SqIuqW{zmcSeGyBhuWzA^`oB9(rW-b@Q-~YA!RUurPJ0 zjr!f;LT9d=vLd5=8sf21Z_DK_Ib~nx(OHijZK3ycrV25Yqp$M(`&%hA4fxKCG$Xxo zFI%Tz4Hg;aBWe@VJ2vCRGn)`IX~E2b^75~5@>4_C&1h)h!SGZ=C<&5B$9<@nUv6H| zl+Bxhbl_#%Hw@EIXKNHrW1HPQhI!F$Vfh0?U}6AGTXXFn;lg#Fg4J(BX-cU{q-2km zy|$uZjIrC9>r?9kR%0GsLwUu;KKY~@D*Sl@i^@5exunQt`74eu-d za{Hp_mqhzj7c7^SoP1a`rwLr<88UQ|TFX${lI!ov4#TZIsPpXK&SMa^$oz$lw!PGa zH!#gIQM8n5X0ZyJuH^VtldSlppQI#1q<{;q`IgZJ$k>M|x=fmq2$S$R?4%rZTW;1_ zO=dZ4?US$lB>-eiffz3S{NsBS!PkcKlt9|?TuIs8-NkJzhxibm7EIgtY={fyqEy!3 zLNTXVE-`+r+&*Dwn>xD;VUyiUOM5kkeEEhB`{fjFlTyCCPmgzhdiO%&)`!L+#bmTC zAe$52gB(k2{kS{R4J1VNVZ7WSf6mTkT8#cvMokl(^~C$l2(BohETV3Eb=l(9X~Lzw zt_y@!dDKmxzU80@7gGn!C`BF9n*cCe*GFM3O$wB0Q!B~Fj=O7s3i$Oa~VyC^t! z|I^_(S!VG>=RuI^=m1H&u$(#bi;shT*eD$C*|@!PE!P&Lj`9V31DNE5AMTi`yhxcp zauj~~K(0`1x8op%1{BJyPS)~l?R-NW1jCktRxAP|gEPoG(<+BgD{38giOGsM-F};d zu$AE1rl|*XjzriP7zJ2n83tuXtx5 z+$*>`p!wDeQpTcwl*`&X{5?9!`?#Br7b+_@Tmu>0+09Td`Jnv-0I$~8=>sG{Fbiej)NQNY&kC8Y~`_bW>yf^Yq z4F(FMo$+XsL>736pBCrn#y=*cXiyVV6=OY|5S=ax771>2K_5&o={!ExFetY*E|<;| zkqyUV#P9x{-Tsk?J^m$etlGjo;@@4K>s3yJihExaQLG+sbAE(mxD=hD1N{0%9-Z?J z3MpynY+1*5n$1jXBiHKg)y*Ig;U`PQET74}5aBFvh>bB32C|yy4k$E-=f?l$UjHB9 zET~u~W?b!67=TOTNB4w#EDs%Y(XE} z-XP0_2+z^-fAhY(!(X`5$&x!`J`919qJ)J#a7f;Q<6j*jMMe%Rzts8%HY>)?J`8A8 z4e3PNH_kFRgP4!|ew1~*1tY0~ysbxnUU+hvV z1Xi#`x{j8L7wW+bRKPb5Y+4EEK3MqtJ0(^I zHX#opjn8mi5hgeq4BOVF-2~jC(cpO)BC2A-t13m-_cRR)&nImoG)B}0-6!LQ`5`7fCNJC?Rf`@z_y{; z??7Tq9tHQY(-55lF8;LTBod+2I~xW08mWiqm5}n`<1$Bko6yMmh%hNJN0xIgA%WAI@m?~p!;d2>RXv_g)HKV z;E#(KPbYg~)!`Ou?mN0nKnXn0iBSL_TjG=*zS8OPwjmnQJFzLGMhp)kcWCT0r$uqk zm}URz%b{>YLG@K3fEfvd`39}WD%65#Nz3=_(K&G16IW3z0fg0UX7(+ch+S<({K$s} zsCw`h&en{y3JMmOtuURbjv2rluQnSWu6$BCc}3QdV#n2a4CZYAj9UFMVn=(A1ja~t zf%Tmbu{-AAAyHpQ`LS_e^>Z54yZuD4;O`?)6-Vz7?!uLr67kx5iE~AA@{GSnN9Cl| zp=5+*MqJ83KzeZC?tQZfRPZoIXHgPb|DM*zYJuv>S#0b|1kf;RSZC(gZJmkJBWY9A z2%=@7&e{BljXT0PpIpL$sIddM=0S>$hh!=&(kQT&|wAYQl1=rCp|Js3g1%Lx7s7j$LB^hyV(=#QyPhELXW| z)|yDXKHv2ASqf)ZJHCbz9}poMF8dM#XCIZOB`q?tFbtpbIDGJkfFsLh?ajBhmc=?!>}K6(Zsn-pSYY-b_gY;3A|j__q`-{#}2!s`&?A%2h||D2H|MA#1Va zj!F-|I`W^J+XBvV{D*!Tv?`AszURWel458eA|0W=v8e&BsdnDCI>nX7to}d%2_Ksj zNbvUet~@ZBX40W41b|w7Q$JX#&Kz9x9n;aROh!FUckF|RY$rZKe(=|3#(HdQPlRbA zVYQIqVJC;X73$v%M07|%inzLJzI0J>m7BZ~f+{x3ri&m#8bc-wnnEYv>-jaoo1}^L z(nyeyum+lP;}9}(DJp2VwZwdu<4+W6jvp)MYWzMSVBZ}mmLs&b`r)kZ+r*Pyn8Pey zQ+iJDe(_qmS@4{8oS2x>Bq+{dwvvL1I=z!-PJ)w{Y$Z0=SR+wkWRlnIBLD4e!dyUS0ei*9-7-~1Wm7} z@l-`XoD{8w0aGHfa0EiXRbp?iJlE}t!p{D_wV4^5<%c$}9i7C~RK2CUCCr;m?n)cr z@ZLtE$iOFkdpfoPqlK(DV&PokV?{Qu1dzOlp1_McG-VGGqz?xoYdIy@tWA6D?XCP_ z<2v*0-5u1)3BCwTdE$Zo9Rb_h74=v%`Fg_V^gBMpW=7Ra;}jpv=h8vNR*Iwccp zrCCsqVAV`PV*9-!Rzl8^)1u;H$WjZkho@(f+IPk8+OUi*KZXKjM@Mlb)@MG9paWRp z8AlVan%$_y}hq72I9D|C&G#hZ*$W$9ftINQSUfD-A)A?ukI2tk0jAB*s?x#!TyjsI`k zBeTvfSbCu%hoH`mkdJ|;(&(Q*QR~8DkJqoCSylPtwm{{ln&U25m%0$Kn+GVl0SLm_ z(o(0$c1tXQx)@|5url-MoX$a9gq&pw)h}ET?Hka%-P$!d_lOlh+|kD^q>o;&Ajlp2Pjx& zMn3r8vjdp{-T`0=>pz|9{1e_*figaudDNPW{{S}}>H?`}BsRrVVt+pRdj_O1I;`$= z{1becm7iSwR-q1D%9gJQ2Gc5609dC0;Gy+feX-&*z}B0D;{T)uipPk_9O+|(zB^-^ z90oNXR5Pb*ZP$GT9p|KgRF3j{@W^)G5w4lR6diiZ-cP`TI0!1|XaEIsKIbI_V$Qb% ziA;lk_rE9rYCtzA;;TmRfR~K3mS?@OS1v8VB;i;sD(HJgB!<7v&o;SJG`~AE6Q*WJ zQA!+yuzJd{srD=1X(;6;ztjO@831W|&*1Uy-T0(Hc9VPavsi#~SFDp564HioGN?w) z2_{!1V~HL}wAIvo_AMDyu6iZDjLH49cDXRqb~z#BY60eYg?3Yd8R91FVk@5NE!J zt8v4%sR%u|fM&1;udBlq1E}%V%u?=n_*Op9us(yyY*>A+E|l+8dI`RcPHFBuiMO|i zmgz3r$v#V6O-8a6L>~_+YJ(sEv`_od(1KBD2@0@^xI()>|L@)u{fx1P9D{=1Czz)P%P2(vgv-sEq+KqFB%NdccLJbN zS^7GFqKJZuI#X>)aU%P{gH2T;8|F!QM3gfM#lf7ve1V1p5G^Mr1^&#;$XZ$wXtCi( zn^X1`H^#prq62Fw0cdVrT^*p0lZ9IK8rw6J%l%nZfGe3jvi_4xV^Nh>AVVn!#6;ND z@k1pvgcc~pdUy`m0Lj>MHUOYt096YkLI&m$%~cA_2w)*?ZRr8--Ur~T8Psmc`TD+N z2$yCd>^iyvc#d@%wxIEFEsSZ;ouebmyUTKbdZx``)EJ(^u5Ja3S%;VAAeM_djn$h4 zxKN4!?SnG@7~qGU{$dC+8_6;T*oOc|?t@3~B)}n!#X}gZsVYMQNCFjmH2|T}B1J3mCoeA(`h#ZLbI?vK`5D>m<9n7bB>>R)`Jr}{+2a(C>R=Qg zbC0AkJ+tg)etPl;oxY@5*+^RceSC~J>rn5Y-9tvg!Dj~mgmF?BC?jMe4#rL@;Y6^fwlM?~ouT{+WbSC*U z1UOM!=j>QKN{zDcU=_7bZ`f*(KjQy6dZqRDU>IGQ_lzTo$Qf5 znnxEQAPP7eX~&yRA;h>L%cdTFMjipxvM}9TR0eedVgWXm_Ls+l)^hXUy{=&sMY>9; z9iPY+OH!(6$O!%<=~3aR4i>^kDYox3i!Tk@brT1bc6X6Du~C!`I8C&a03s7mg%vyf z^$;#&J}k1m2vFgMuV-vjs<6fx0S0Ctl~6UX&0D`eH)m%np{bYE?C(#eE8J8_@i!UHQiEgYW~lVn8QiHVkOo0TC9owS_24T*>(dN&+svurM;3thy?+llH{>1HfaY0jPVBn@go? zUaVhMM9sd8K_{JXl8+{&b4U*FG!g$2I>uT5{KXo@9v=FWxpTUmYOQs9#{ar9v21Ch z$|@wY#hNCBIHNpyW+Pari?y_21-#;s6J)tSG$wni=JNs%Z31|765W>X(<8r z=yOuAJzE1I@bvWLCyoRixUF2F0=~7(;EX;;!}|#y&6)ffhKq9j{^-3~n(Rd}h9dbG z)cOUGj@J#tmN9T2Ya(Hb%!Xb^+;0IFZN=BQJ6Q_U9((mLzZ*Z}=6loaYU(3S08I68 zuV?6`tR~vKCD}$2DIFcd%F{QS18~kwI5a4jI$6nMrn$E00X~rS&k* z3S#!S!cYyIkNWrR3D?EZr1>>>@9CN7O>TSWOj+j=wabRK33j*O1fWogG0aP{RwFRE9 zMUZaT|Br7}f=Z_tD9sT9S082&eg_DC1hAos>-E3G*5pH0qoJNfEO>$N7m+WuTovu4 z#wa{VKd_^$Cpwtq1P60MO9Y^0L_Y*bCqS7R&a|f(7Wg#-_Q(~$wTHn8Mh!#A2^NKq zV?I|j@#gBQwWrGRY3JYoi}#s^OTqdQzo|7PZ}+Ud&AoCzSSrBvDL^@Y&x4Inb_=uwR6j z@0W{eg|4&tq8JPAyELBlb9kDELC!e2gvC1)hSC50n2^ExS(H8(q*Oc#mH!j!NlShFm?|@?xkl%1QNSZFx7DPWB(ibA5yT^sU$+;y6>j$I*Uqt<%D~XkSqqCy z6&5|q96~VjZ(BZLr%!V9oHxA*0D(AGlRAK9Ta|zTVWC)(Gp=xMlz0j%Z>Z}EOIc1h z%Wf#_XNR3Ab0nV@dTYC}AXkJIz70zi4a07xT6&73dt9wUGrg-t>-w${rltNuAVU+{ z$+j9=MDoX>W;dF0`QA0O&t$l^sym;{VMQegrj1{Le83$2VU6O6%4Sb3jmG%x`*h21 zm(bBMs1nfCKzEUuk?85R8|mqG5U6lp!-%zb9W4aN^<@}og#$kg)djub$4go`{_4BW zMNrp0nOiW*GamrX^VcoS=02|AmcRL)%>*qH0*g-fq$Hhbs}dJ?fXp{#VwssQJv#79> zye(VOT%j_JIh}L&0h`fb{eaq93)HGt8R40al)OEY3GsUJOm2U&4F!g`a?1RepQ+TYw0`(z_Lr(}-`r(0djE}}{92B( zC}0gkpM+A9h-PBnMlxmJJAE*I;4*!*z_$1$hrRR5duV^{7cqyf%E%rc9jfH5Yhq9n zog!bQuysg;nYA7qE=gZR>RZHirt4dEf^&H${lGk8b9S9<9KS!mspA(|b1>-{%|5~A z^f8Q%H(5;YMuc-dni-_+AcyFELYz<0Ai(Fyv^jk+O!7b6)gvS3*)$ z+F#Fo!vLID&W8GU_ENFU>vf{Hn(IRDZgokojMA||XiBU@;0-wPkz2{>!9@JghuN;# zaI%ty4o(tJ%ZwywXC=BCe@Q7krvuJ>HJ~5oBZ?bBL4a>IKIl|dr13JyO`>?>U* zhvIx@cnxT7(ujcZ{(O#5-z3|Ng~32`Q*RPnWbqdkb3dqil|LW{zQIl>1G-xcwNO8L9J^n6Av;Gzm;(GpAaMz-Eh0 z8JJ;wiU*elnxVALH#eQS$>Q1UU6L{_Bi;A?TO^qN>wV~lK5T=)LkE^`%Ne>#MDgM= z6}$``4qZi_ob!yXhJ1(pobjp$r6I*-@9WOBY%kj4Nc280TqJkK?jgUrLyCm=q?1$C z7{!xlH2#TyYguKR)_Ig2aw(r=Nuv08K9tC>=K3N|%GC8IxEP^Agt0`wN=aAhm8yoz z!APS26^UZ5tuyi;JcU$UPg)Ed4hQ~Oq7?tXhOh6pKW?mN<1qLd>Et4GQOA6`%Qcba zbnu^D`RS)Z?bAuCA3TKu3w@_Pgd<=!4f-ayflh_MC*tj$(#cSYr6%6H;eO;QvZ_P! z366*&Z>g}tpd5~x3z zb9mQab`OgqYx}$IH=PZ;cn2_i0sb4+G|F8n3^$i;W8(J4KC^W$X`50D#W10-nI&@; zd7lQ(oH?QTR&K{%b~s}YcwHU6@?6RI_?5V0XC?9neXsMto}{oxeaqQ zA*Hzbi%Y-tFHT12ffVsyE5QxQ$4%@GhqvYELj##v!($|db=o(?Q&GB*V7hDAQ-0&= zPb5E!#LY8KEX&Kcx|Vb)`mY}wk%=4bO;g*r6`C$g$%`_byWV--7Rutw%1SewJM&%d zEHbzIb#-V~K`XlO_!)HaG`;(b{#1~D=$16GKN*k@Ykx`(`=i}cl3&Bg(E!!uteI(= z-CQ@NTC05YBqae?e}n4o1b05Lh*YW@S)Kgu6|)I4|9DY~Ij)^6?>l5A_Z@-h8ph2j z;TbVhj!W?)nQ}LI`qtA3S`-pN6_ZKLd))&O+%pyUrcCGmSN4=Gmk(Jvgr(ps=<|Zh z>x|lV(#_$gLAP_U*wot{b_4%>jT)|0;D99j{Bo5-mleQ>ipj~1v+F`LW|1BW)&5iF zy~dlQZfbiLmZ@^8RsBQR+>qZ{mkN!wb&F>3ClQ4&Qg-ve*6=G%sAd$3xLg@An|hU1 zb?vD|?BYCQ_RTGx>9q-#0!#c?XS(qE5**~d2$;L-snW?(d#u|PC)wZn7Tu|S69x2K zy-p)o;RVX_&$%u3C#UK_63WSfq6D3I;4w~CXQQ*XL?OXzhLS=}r{okbU*fr^RP4S* z)``b0-6*va>fpP+;2!tC+eWX){Yfdl+y$zuw5t*3@@ODE$5E7UgW*lf8gt$FZ~Vz@ zh{w?Wb%9oK7uP?8n}KCxW|C>6soOo+lcGTr=^}Nq_a571(ZL;-{fa|Wjo$a1&4Njr zER^ZQAN?z_xIL!-*~{*Q(>IZ+@?q=5-lMyd$Fw?p@tEb3^T=O0V*q*|rgl14KEhz|<{4q8FLL%n{E)uH+UcXD zC!Vc_CKdE;bh2($a#v>RZ(b>~1rP0v!Ju6O-N7vb@LWlTmJ!KrS7?DTv;L!~qUpru z%d}~_t@I*sOOGwqf^iTFTIXP?N=577M)YK8M%3D0r-5|=SVW&OzL7^Upl^ImJTNZb*|`jav#<;KD-F~5*`7;0G6dx8ujVh+Li+Llw)nv z{uxYa+ypQR#(_(LYe%$OpdY{etLLFE{p=~tSxwLEIZSvK>r1M%;`-Pk`Z1?k`N)$d-49EL#d>?f08l`}`8FY_9S9v)UnEeNjd-?@_^;(W}XpqJHw+P=7q9Dy=DeI zSN8Xrg_qY0DS}>$wtGfBe0j?RdBpxhIz7s3j z%k{J88Sq~*R`o?mTu}f(Zx$1oRA!~tF%sYP-T6gsa+l3)?b{zpO;LlfrBT^!V_TQ= zoKV?H`e9lCw6&xU?{L0*V>RY*v5Bh&A;Z#&c4V+HtCfLASE8rT{u+;Q#Y}zJrO|s>DiRABM(em=3XK*biJyz zGEIj$%rur*%SV+b)suDV;D!I?fWHlcq%HyH#zQ3Kv8Vw<3=@G-oF8e+yd&zEO9ebA06fu?< zgH#OJ5}A=EyDUWvhL9~}%QmuQNIggt-#h(&|9$?rpSzrU&bjw>-|zFrj3x=d1`I%{ zX#)(vi9Z+U7&|-3v8j&7=9-nKeBV?Xeb4XV(hbec;Ht{D;~lZ-f`Zr1m1kE?-Gx}P z-G`MS_>011B^a#p$+#?L`_i-Z+6!dD*?p=l-q!exuO<$xPK1wLfs?VwTk(V_azbCv zkS(x3Z@0uYPa^V^Y@TamVK2Y^%)**&SGgfr-8GEID?1p@%jAYAD0672C^DIQ;rk|} zlJW^mBc^=djs30+pv<0^hhNSi4@&C4Sg%hSJc?y_F~_Zq*>ois&?a#e?I$>y4Zi{F-ZH2%PRGLon981{HkH;1`ra)c*pNRx3?fg8 zW+^9y#C9J?W8pK9ki1lTAP-{s@z zRF6%2Dv@M}4MzC;oiI^>!eA>7%VB8OE>{j)q zVN-K6M%uWKlKy+e*GIx#^CbkWqc>L1=iI0$!#oG^l&c6~pQ+_daszIW5PF8FDp1K0 zLB^TF zb&a#G)KvADcnpWE{?(vz1bqL%#&K2r-N{7E0P(jP^7 z^fGB39$qq4Bu?!?EZbd)7&771hHGvik=BbheEX7CO;D>2M8w2CN-$xh8Iym=loLI% z(l8IP=RNH+1i8wQO^^VNGu1L-2_w#=q+jvFs#jU?a>`t2os+d+@l3Odad-m;qkM%=9u$+a=2b!1GRv^|}|)xAU& z76c#b4i$<*V6P+ex2LW(=VhOh9lj1$Spt21^0cMo)QH~8!Y2Y1e*_)jfxC9R@^9z9 z%;-s5@~+Al`#qNd35q}gEzP`@0%c#Hth={-){YETD;oFfR!f*_QA%Y&$)~EmdDrf=Ffj^Cvx%y-DL4s07JGadA2Hdf4vwK& zIoe8N-G6KngFd?`4%gRTOtrl0 zMh|}NhmXbM%QfeGrI6K8A?TP{%Rs|RedFP>YP;;ZvG{@Do5XhnnxEUIFuc0D*|o>X z?UrRa<~t>v`6x`a0&6{)FX4I=41!Fi_itBDEe>+>RPj7a-K}Rl;7FLp9eu{}o7`>2 zq^{h($Ez3PSR+qq=_~rXA3Cm+2^lEOfxUOSBI{oxJx(fsqs6&xVd!1Jt-~PPjY?p^ z6-@D%WuSA_O{vIgs%_;82pB}rl9%jle&B5@n2z8Xh)6QXXm5-M3@B;-cSECUTn469 z$u3>&`Lsbqu1bT%a2Y>2H*W^Rbp#)VPj)haNXdzSs{LfY(EQS2r2eYG^N#X|P~e&a zyvYoKqlRLWs>40{(Sk+muTK)Ig)HHUe7@5Jw)>Erp2HBOa~9b?u|m*j?nONrs3rN%SPtle^UDqO6PMm;|7oN#VBzUOI2>8F|-S~bQDMU_Nbep)lrJ%1@R zhd?PinB#NAcU&rW!0-nr;C+ecxizr|OT%-M7&?)OaRGbg5;@qwD`aWmlOW|Mw2hw+ zkU~0YQWsA=DGyNHjK^mg#(v8)?cs<5rUV-?aR?Gg7|$z)6NX~=936%15olt&0%eRc>D%y~XA|k{Wl9t8J+CRvIycmuHF~UI zMTE)F_~Jda-D{r$(BV4Ep91>wOg%~q9c}=pNs@A~G?1A6BL@(t-@2l4`X?p|kF59o z_+dRzP!W!q7d7r{wQ0Riw~h{HN-7?JS$Hot2Dw0ism-);d9#~cOkT(>U0W}ne?o8+xls7v9@ke^Sl~^f&FSr&8+TddwpMG7GK+Z52(n(+H|y9y{{K5 zQheXdW$x_9+CrdMqa6dKKIv(9ZIS-o=02P|id3~2>S+n^VV88Dzuz9MYhr;obLQ4t zfaxnM5r93!nbyOxc(a_*u`%tHD?$VbA=QvDhS4&(nN6-BI5<<>Blmt5GwnavK8snz zkSh!Q4x;Lq;#XW3uNbm{QZbGdd5^(@W-q06A)!o?O4SH{)DP5%0+O3#ig9$CU$Oh0 z!Jm^iIq<{T*#r+L?nO%3XyCU^a;Rj?#{0B!tTF81Ze8H?iJsN@wuZDN^~r{TKAaT2 zM;rXLp9fOg1jy55T593|l>KpiY_0^~UN2t6%eh(o@S5ANj4o&WTczLzq;yB#PS*Lo zsN!Z<7Q-mt*x2;WF7l3d1O!~^(p%HmIu!&(s6M}b@B^MsSWg+^0QFKKZLGcLz^>IRo-d>%YpmFEhq# zVF{Ph?~A)F9jFJ`@f1V$89}{wW-pVfYDCLs&?-fJ(A?3UPJ?5Ob((N^xwBJoP<;tN zpi}QRs@hDW4h#NRj{T7R%)q%>&o*Wnuga(OLP0Jl6nMeIp^^o4Dt7myyV}T!^74}F zD$8V;w0yiye#F|wz;K<`fVGO1n6HPUhh9w>A{nrmbVC-CY1~??p)Db^etJHE>eK3x zni&#_VmbYMY)p5K59N_=3nl=V2F#P%Mqg;~vNaTroD|DZsAk}ssZt0>q$FH&#w#bc zXPiT*T}t-DTs9xoAJE74x!bguxY$4HfAjB2X1){#g5QY;D4vvl&1^s_(+w zX3WnFC~&f{9IR2PE2Bk63>WA2Zvoc1uitlMspqc-&XWqng>-!GxFDK*6o(ss;4b(j zOan;o*i~w$q+U1>tlW5PD(U0&impMp{UBGn#9a^|vm=06W~O@#2OIyB2o zbU-D_oXJp-lM4>Yk7<%J(L2wJn3=xshcrJO=b~WEM5#!YidTF9J`@%XMdSd<7&O4c z3&J(wMn)$awuH(?veK`$|J)8q1N^f=-9rS8)RJOcV16E9*-4S3^}saL=cF`hk^}0E zl0YK!0jM6(GFWJN-G3xQ6_$ny@dXfUWq zi6fC>O0$+y z;hyCFVUAB!cmVP|t0MxD_1GaBE)7AF;>j?_FbJq?HFI`mP3cm&cTWemP1WHDRw-`c z-#6B{boC8e&E$zB_u*NcB6w;x4Fhhg$|0@M0ZAo?Kuol*eSwTWfFjOktlm`Q%mj-t zGCBt+Jt`Mp4}YAt1VjX!>F`9O&!v7*t9Aq(9$L)&FIr#2L`C>7!J;}y)D+5o|MQC% zPQ{+-DtT9~h4JJQ;(`vb-XA1?MLz|c^(FP?210j9iid~*n(;G-i^IF`R11)eiEHzx z&-;-Pm(vL{p(4qFYH#Nm?3uCS$BVzJ_r99#f4lzBZL^t6j9*E$`PF6nTQJ^<)P3Z(Nq^*J znhbta@%&F?0ry=^{fP=S3N3Hd`&Vr(@ExPe#gR*_&KD_q=-~gCU<-_4`aMbtL`mPj zS*f3z4G?Chqci&-NjIZk;F>Gxj#f?_(?r<1DO&9}MEgX|30VuO?Qnf}zE~m{hc33; zz>?8Kd1|_>jkF<9VS@q|DX!*MXKspAij7t$=S?v7(CcZZ-2=9p>WpX)TWIdxa+7)4 zAIpr=+U|f*&49t~v~d51x)#{3ghZ;?ZYrE*wCs?zsPn<@0qpK&zToz4l->9@f=9B= zY}=|{92^x#$S0uY0aHkG^1gk)zxcMiP4m*auX;9#Lj}PZ&1U6THyR=W09TEsc1Bc6 zKL2f^;4gtEwH{r+^R0>#Tfj4R7sM145saS%;>@x+2; QOVgXZ0=G1-G<1vqA1xx5L;wH) literal 0 HcmV?d00001 diff --git a/static/variation_walkthru/000001.3357757885.png b/docs/assets/variation_walkthru/000001.3357757885.png similarity index 100% rename from static/variation_walkthru/000001.3357757885.png rename to docs/assets/variation_walkthru/000001.3357757885.png diff --git a/static/variation_walkthru/000002.1614299449.png b/docs/assets/variation_walkthru/000002.1614299449.png similarity index 100% rename from static/variation_walkthru/000002.1614299449.png rename to docs/assets/variation_walkthru/000002.1614299449.png diff --git a/static/variation_walkthru/000002.3647897225.png b/docs/assets/variation_walkthru/000002.3647897225.png similarity index 100% rename from static/variation_walkthru/000002.3647897225.png rename to docs/assets/variation_walkthru/000002.3647897225.png diff --git a/static/variation_walkthru/000003.1614299449.png b/docs/assets/variation_walkthru/000003.1614299449.png similarity index 100% rename from static/variation_walkthru/000003.1614299449.png rename to docs/assets/variation_walkthru/000003.1614299449.png diff --git a/static/variation_walkthru/000004.3747154981.png b/docs/assets/variation_walkthru/000004.3747154981.png similarity index 100% rename from static/variation_walkthru/000004.3747154981.png rename to docs/assets/variation_walkthru/000004.3747154981.png diff --git a/docs/features/CLI.md b/docs/features/CLI.md new file mode 100644 index 0000000000..d809d48841 --- /dev/null +++ b/docs/features/CLI.md @@ -0,0 +1,228 @@ +# **Interactive Command-Line Interface** + +The `dream.py` script, located in `scripts/dream.py`, provides an interactive interface to image generation similar to the "dream mothership" bot that Stable AI provided on its Discord server. + +Unlike the txt2img.py and img2img.py scripts provided in the original CompViz/stable-diffusion source code repository, the time-consuming initialization of the AI model initialization only happens once. After that image generation +from the command-line interface is very fast. + +The script uses the readline library to allow for in-line editing, command history (up and down arrows), autocompletion, and more. To help keep track of which prompts generated which images, the script writes a log file of image names and prompts to the selected output directory. + +In addition, as of version 1.02, it also writes the prompt into the PNG file's metadata where it can be retrieved using scripts/images2prompt.py + +The script is confirmed to work on Linux, Windows and Mac systems. + +_Note:_ This script runs from the command-line or can be used as a Web application. The Web GUI is currently rudimentary, but a much better replacement is on its way. + +``` +(ldm) ~/stable-diffusion$ python3 ./scripts/dream.py +* Initializing, be patient... +Loading model from models/ldm/text2img-large/model.ckpt +(...more initialization messages...) + +* Initialization done! Awaiting your command... +dream> ashley judd riding a camel -n2 -s150 +Outputs: + outputs/img-samples/00009.png: "ashley judd riding a camel" -n2 -s150 -S 416354203 + outputs/img-samples/00010.png: "ashley judd riding a camel" -n2 -s150 -S 1362479620 + +dream> "there's a fly in my soup" -n6 -g + outputs/img-samples/00011.png: "there's a fly in my soup" -n6 -g -S 2685670268 + seeds for individual rows: [2685670268, 1216708065, 2335773498, 822223658, 714542046, 3395302430] +dream> q + +# this shows how to retrieve the prompt stored in the saved image's metadata +(ldm) ~/stable-diffusion$ python ./scripts/images2prompt.py outputs/img_samples/*.png +00009.png: "ashley judd riding a camel" -s150 -S 416354203 +00010.png: "ashley judd riding a camel" -s150 -S 1362479620 +00011.png: "there's a fly in my soup" -n6 -g -S 2685670268 +``` + +

+ +

+ +The `dream>` prompt's arguments are pretty much identical to those +used in the Discord bot, except you don't need to type "!dream" (it +doesn't hurt if you do). A significant change is that creation of +individual images is now the default unless --grid (-g) is given. A +full list is given in [List of prompt arguments] +(#list-of-prompt-arguments). + +# Arguments + +The script itself also recognizes a series of command-line switches +that will change important global defaults, such as the directory for +image outputs and the location of the model weight files. + +## List of arguments recognized at the command line: + +These command-line arguments can be passed to dream.py when you first +run it from the Windows, Mac or Linux command line. Some set defaults +that can be overridden on a per-prompt basis (see [List of prompt +arguments] (#list-of-prompt-arguments). Others + +| Argument | Shortcut | Default | Description | +|--------------------|------------|---------------------|--------------| +| --help | -h | | Print a concise help message. | +| --outdir | -o | outputs/img_samples | Location for generated images. | +| --prompt_as_dir | -p | False | Name output directories using the prompt text. | +| --from_file | | None | Read list of prompts from a file. Use "-" to read from standard input | +| --model | | stable-diffusion-1.4| Loads model specified in configs/models.yaml. Currently one of "stable-diffusion-1.4" or "laion400m"| +| --full_precision | -F | False | Run in slower full-precision mode. Needed for Macintosh M1/M2 hardware and some older video cards. | +| --web | | False | Start in web server mode | +| --host | | localhost | Which network interface web server should listen on. Set to 0.0.0.0 to listen on any. | +| --port | | 9090 | Which port web server should listen for requests on. | +| --config | | configs/models.yaml | Configuration file for models and their weights. | +| --iterations | -n | 1 | How many images to generate per prompt. | +| --grid | -g | False | Save all image series as a grid rather than individually. | +| --sampler | -A| k_lms | Sampler to use. Use -h to get list of available samplers. | +| --seamless | | False | Create interesting effects by tiling elements of the image. | +| --embedding_path | | None | Path to pre-trained embedding manager checkpoints, for custom models | +| --gfpgan_dir | | src/gfpgan | Path to where GFPGAN is installed. | +| --gfpgan_model_path| | experiments/pretrained_models/GFPGANv1.3.pth| Path to GFPGAN model file, relative to --gfpgan_dir. | +| --device | -d| torch.cuda.current_device() | Device to run SD on, e.g. "cuda:0" | + +These arguments are deprecated but still work: + +| Argument | Shortcut | Default | Description | +|--------------------|------------|---------------------|--------------| +| --weights | | None | Pth to weights file; use `--model stable-diffusion-1.4` instead | +| --laion400m | -l | False | Use older LAION400m weights; use `--model=laion400m` instead | + +**A note on path names:** On Windows systems, you may run into + problems when passing the dream script standard backslashed path + names because the Python interpreter treats "\" as an escape. + You can either double your slashes (ick): C:\\\\path\\\\to\\\\my\\\\file, or + use Linux/Mac style forward slashes (better): C:/path/to/my/file. + +## List of prompt arguments + +After the dream.py script initializes, it will present you with a +**dream>** prompt. Here you can enter information to generate images +from text (txt2img), to embellish an existing image or sketch +(img2img), or to selectively alter chosen regions of the image +(inpainting). + +### This is an example of txt2img: + +~~~~ +dream> waterfall and rainbow -W640 -H480 +~~~~ + +This will create the requested image with the dimensions 640 (width) +and 480 (height). + +Here are the dream> command that apply to txt2img: + +| Argument | Shortcut | Default | Description | +|--------------------|------------|---------------------|--------------| +| "my prompt" | | | Text prompt to use. The quotation marks are optional. | +| --width | -W | 512 | Width of generated image | +| --height | -H | 512 | Height of generated image | +| --iterations | -n | 1 | How many images to generate from this prompt | +| --steps | -s | 50 | How many steps of refinement to apply | +| --cfg_scale | -C | 7.5 | How hard to try to match the prompt to the generated image; any number greater than 0.0 works, but the useful range is roughly 5.0 to 20.0 | +| --seed | -S | None | Set the random seed for the next series of images. This can be used to recreate an image generated previously.| +| --sampler | -A| k_lms | Sampler to use. Use -h to get list of available samplers. | +| --grid | -g | False | Turn on grid mode to return a single image combining all the images generated by this prompt | +| --individual | -i | True | Turn off grid mode (deprecated; leave off --grid instead) | +| --outdir | -o | outputs/img_samples | Temporarily change the location of these images | +| --seamless | | False | Activate seamless tiling for interesting effects | +| --log_tokenization | -t | False | Display a color-coded list of the parsed tokens derived from the prompt | +| --skip_normalization| -x | False | Weighted subprompts will not be normalized. See [Weighted Prompts](./OTHER.md#weighted-prompts) | +| --upscale | -U | -U 1 0.75| Upscale image by magnification factor (2, 4), and set strength of upscaling (0.0-1.0). If strength not set, will default to 0.75. | +| --gfpgan_strength | -G | -G0 | Fix faces using the GFPGAN algorithm; argument indicates how hard the algorithm should try (0.0-1.0) | +| --save_original | -save_orig| False | When upscaling or fixing faces, this will cause the original image to be saved rather than replaced. | +| --variation |-v| 0.0 | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with -S and -n to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). | +| --with_variations | -V| None | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. | + +Note that the width and height of the image must be multiples of +64. You can provide different values, but they will be rounded down to +the nearest multiple of 64. + + +### This is an example of img2img: + +~~~~ +dream> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit +~~~~ + +This will modify the indicated vacation photograph by making it more +like the prompt. Results will vary greatly depending on what is in the +image. We also ask to --fit the image into a box no bigger than +640x480. Otherwise the image size will be identical to the provided +photo and you may run out of memory if it is large. + +In addition to the command-line options recognized by txt2img, img2img +accepts additional options: + +| Argument | Shortcut | Default | Description | +|--------------------|------------|---------------------|--------------| +| --init_img | -I | None | Path to the initialization image | +| --fit | -F | False | Scale the image to fit into the specified -H and -W dimensions | +| --strength | -s | 0.75 | How hard to try to match the prompt to the initial image. Ranges from 0.0-0.99, with higher values replacing the initial image completely.| + +### This is an example of inpainting: + +~~~~ +dream> waterfall and rainbow -I./vacation-photo.png -M./vacation-mask.png -W640 -H480 --fit +~~~~ + +This will do the same thing as img2img, but image alterations will +only occur within transparent areas defined by the mask file specified +by -M. You may also supply just a single initial image with the areas +to overpaint made transparent, but you must be careful not to destroy +the pixels underneath when you create the transparent areas. See +[Inpainting](./INPAINTING.md) for details. + +inpainting accepts all the arguments used for txt2img and img2img, as +well as the --mask (-M) argument: + +| Argument | Shortcut | Default | Description | +|--------------------|------------|---------------------|--------------| +| --init_mask | -M | None |Path to an image the same size as the initial_image, with areas for inpainting made transparent.| + + +# Command-line editing and completion + +If you are on a Macintosh or Linux machine, the command-line offers +convenient history tracking, editing, and command completion. + +- To scroll through previous commands and potentially edit/reuse them, use the up and down cursor keys. +- To edit the current command, use the left and right cursor keys to position the cursor, and then backspace, delete or insert characters. +- To move to the very beginning of the command, type CTRL-A (or command-A on the Mac) +- To move to the end of the command, type CTRL-E. +- To cut a section of the command, position the cursor where you want to start cutting and type CTRL-K. +- To paste a cut section back in, position the cursor where you want to paste, and type CTRL-Y + +Windows users can get similar, but more limited, functionality if they +launch dream.py with the "winpty" program: + +~~~ +> winpty python scripts\dream.py +~~~ + +On the Mac and Linux platforms, when you exit dream.py, the last 1000 +lines of your command-line history will be saved. When you restart +dream.py, you can access the saved history using the up-arrow key. + +In addition, limited command-line completion is installed. In various +contexts, you can start typing your command and press tab. A list of +potential completions will be presented to you. You can then type a +little more, hit tab again, and eventually autocomplete what you want. + +When specifying file paths using the one-letter shortcuts, the CLI +will attempt to complete pathnames for you. This is most handy for the +-I (init image) and -M (init mask) paths. To initiate completion, start +the path with a slash ("/") or "./". For example: + +~~~ +dream> zebra with a mustache -I./test-pictures +-I./test-pictures/Lincoln-and-Parrot.png -I./test-pictures/zebra.jpg -I./test-pictures/madonna.png +-I./test-pictures/bad-sketch.png -I./test-pictures/man_with_eagle/ +~~~ + +You can then type "z", hit tab again, and it will autofill to "zebra.jpg". + +More text completion features (such as autocompleting seeds) are on their way. + diff --git a/docs/features/IMG2IMG.md b/docs/features/IMG2IMG.md new file mode 100644 index 0000000000..ac560f6984 --- /dev/null +++ b/docs/features/IMG2IMG.md @@ -0,0 +1,30 @@ +# **Image-to-Image** + +This script also provides an img2img feature that lets you seed your +creations with an initial drawing or photo. This is a really cool +feature that tells stable diffusion to build the prompt on top of the +image you provide, preserving the original's basic shape and +layout. To use it, provide the `--init_img` option as shown here: + +``` +dream> "waterfall and rainbow" --init_img=./init-images/crude_drawing.png --strength=0.5 -s100 -n4 +``` + +The `--init_img (-I)` option gives the path to the seed +picture. `--strength (-f)` controls how much the original will be +modified, ranging from `0.0` (keep the original intact), to `1.0` +(ignore the original completely). The default is `0.75`, and ranges +from `0.25-0.75` give interesting results. + +You may also pass a `-v` option to generate count variants on +the original image. This is done by passing the first generated image +back into img2img the requested number of times. It generates +interesting variants. + +If the initial image contains transparent regions, then Stable +Diffusion will only draw within the transparent regions, a process +called "inpainting". However, for this to work correctly, the color +information underneath the transparent needs to be preserved, not +erased. See [Creating Transparent Images For +Inpainting](./INPAINTING.md#creating-transparent-regions-for-inpainting) +for details. diff --git a/docs/features/INPAINTING.md b/docs/features/INPAINTING.md new file mode 100644 index 0000000000..bf28de31a6 --- /dev/null +++ b/docs/features/INPAINTING.md @@ -0,0 +1,41 @@ +# **Creating Transparent Regions for Inpainting** + +Inpainting is really cool. To do it, you start with an initial image +and use a photoeditor to make one or more regions transparent +(i.e. they have a "hole" in them). You then provide the path to this +image at the dream> command line using the `-I` switch. Stable +Diffusion will only paint within the transparent region. + +There's a catch. In the current implementation, you have to prepare +the initial image correctly so that the underlying colors are +preserved under the transparent area. Many imaging editing +applications will by default erase the color information under the +transparent pixels and replace them with white or black, which will +lead to suboptimal inpainting. You also must take care to export the +PNG file in such a way that the color information is preserved. + +If your photoeditor is erasing the underlying color information, +`dream.py` will give you a big fat warning. If you can't find a way to +coax your photoeditor to retain color values under transparent areas, +then you can combine the `-I` and `-M` switches to provide both the +original unedited image and the masked (partially transparent) image: + +``` +dream> man with cat on shoulder -I./images/man.png -M./images/man-transparent.png +``` + +We are hoping to get rid of the need for this workaround in an upcoming release. + +## Recipe for GIMP + +[GIMP](https://www.gimp.org/) is a popular Linux photoediting tool. + +1. Open image in GIMP. +2. Layer->Transparency->Add Alpha Channel +3. Use lasoo tool to select region to mask +4. Choose Select -> Float to create a floating selection +5. Open the Layers toolbar (^L) and select "Floating Selection" +6. Set opacity to 0% +7. Export as PNG +8. In the export dialogue, Make sure the "Save colour values from + transparent pixels" checkbox is selected. diff --git a/docs/features/OTHER.md b/docs/features/OTHER.md new file mode 100644 index 0000000000..3853b185ed --- /dev/null +++ b/docs/features/OTHER.md @@ -0,0 +1,133 @@ +## **Google Colab** + +Stable Diffusion AI Notebook:
Open and follow instructions to use an +isolated environment running Dream.
+ +Output Example: +![Colab Notebook](../assets/colab_notebook.png) + +--- + +## **Seamless Tiling** + +The seamless tiling mode causes generated images to seamlessly tile +with itself. To use it, add the `--seamless` option when starting the +script which will result in all generated images to tile, or for each +`dream>` prompt as shown here: + +``` +dream> "pond garden with lotus by claude monet" --seamless -s100 -n4 +``` + +--- + +## **Reading Prompts from a File** + +You can automate `dream.py` by providing a text file with the prompts +you want to run, one line per prompt. The text file must be composed +with a text editor (e.g. Notepad) and not a word processor. Each line +should look like what you would type at the dream> prompt: + +``` +a beautiful sunny day in the park, children playing -n4 -C10 +stormy weather on a mountain top, goats grazing -s100 +innovative packaging for a squid's dinner -S137038382 +``` + +Then pass this file's name to `dream.py` when you invoke it: + +``` +(ldm) ~/stable-diffusion$ python3 scripts/dream.py --from_file "path/to/prompts.txt" +``` + +You may read a series of prompts from standard input by providing a filename of `-`: + +``` +(ldm) ~/stable-diffusion$ echo "a beautiful day" | python3 scripts/dream.py --from_file - +``` + +--- + +## **Shortcuts: Reusing Seeds** + +Since it is so common to reuse seeds while refining a prompt, there is now a shortcut as of version 1.11. Provide a `**-S**` (or `**--seed**`) +switch of `-1` to use the seed of the most recent image generated. If you produced multiple images with the `**-n**` switch, then you can go back further using -2, -3, etc. up to the first image generated by the previous command. Sorry, but you can't go back further than one command. + +Here's an example of using this to do a quick refinement. It also illustrates using the new `**-G**` switch to turn on upscaling and face enhancement (see previous section): + +``` +dream> a cute child playing hopscotch -G0.5 +[...] +outputs/img-samples/000039.3498014304.png: "a cute child playing hopscotch" -s50 -W512 -H512 -C7.5 -mk_lms -S3498014304 + +# I wonder what it will look like if I bump up the steps and set facial enhancement to full strength? +dream> a cute child playing hopscotch -G1.0 -s100 -S -1 +reusing previous seed 3498014304 +[...] +outputs/img-samples/000040.3498014304.png: "a cute child playing hopscotch" -G1.0 -s100 -W512 -H512 -C7.5 -mk_lms -S3498014304 +``` + +--- + +## **Weighted Prompts** + +You may weight different sections of the prompt to tell the sampler to attach different levels of +priority to them, by adding `:(number)` to the end of the section you wish to up- or downweight. +For example consider this prompt: + +``` + tabby cat:0.25 white duck:0.75 hybrid +``` + +This will tell the sampler to invest 25% of its effort on the tabby +cat aspect of the image and 75% on the white duck aspect +(surprisingly, this example actually works). The prompt weights can +use any combination of integers and floating point numbers, and they +do not need to add up to 1. + +--- + +## **Simplified API** + +For programmers who wish to incorporate stable-diffusion into other products, this repository includes a simplified API for text to image generation, which lets you create images from a prompt in just three lines of code: + +``` +from ldm.generate import Generate +g = Generate() +outputs = g.txt2img("a unicorn in manhattan") +``` + +Outputs is a list of lists in the format [filename1,seed1],[filename2,seed2]...]. + +Please see ldm/generate.py for more information. A set of example scripts is coming RSN. + +--- + +## **Preload Models** + +In situations where you have limited internet connectivity or are +blocked behind a firewall, you can use the preload script to preload +the required files for Stable Diffusion to run. + +The preload script `scripts/preload_models.py` needs to be run once at +least while connected to the internet. In the following runs, it will +load up the cached versions of the required files from the `.cache` +directory of the system. + +``` +(ldm) ~/stable-diffusion$ python3 ./scripts/preload_models.py +preloading bert tokenizer... +Downloading: 100%|██████████████████████████████████| 28.0/28.0 [00:00<00:00, 49.3kB/s] +Downloading: 100%|██████████████████████████████████| 226k/226k [00:00<00:00, 2.79MB/s] +Downloading: 100%|██████████████████████████████████| 455k/455k [00:00<00:00, 4.36MB/s] +Downloading: 100%|██████████████████████████████████| 570/570 [00:00<00:00, 477kB/s] +...success +preloading kornia requirements... +Downloading: "https://github.com/DagnyT/hardnet/raw/master/pretrained/train_liberty_with_aug/checkpoint_liberty_with_aug.pth" to /u/lstein/.cache/torch/hub/checkpoints/checkpoint_liberty_with_aug.pth +100%|███████████████████████████████████████████████| 5.10M/5.10M [00:00<00:00, 101MB/s] +...success +``` diff --git a/docs/features/TEXTUAL_INVERSION.md b/docs/features/TEXTUAL_INVERSION.md new file mode 100644 index 0000000000..3641732e79 --- /dev/null +++ b/docs/features/TEXTUAL_INVERSION.md @@ -0,0 +1,70 @@ +# **Personalizing Text-to-Image Generation** + +You may personalize the generated images to provide your own styles or objects by training a new LDM checkpoint and introducing a new vocabulary to the fixed model as a (.pt) embeddings file. Alternatively, you may use or train HuggingFace Concepts embeddings files (.bin) from https://huggingface.co/sd-concepts-library and its associated notebooks. + +**Training** + +To train, prepare a folder that contains images sized at 512x512 and execute the following: + +**WINDOWS**: As the default backend is not available on Windows, if you're using that platform, set the environment variable `PL_TORCH_DISTRIBUTED_BACKEND=gloo` + +``` +(ldm) ~/stable-diffusion$ python3 ./main.py --base ./configs/stable-diffusion/v1-finetune.yaml \ + -t \ + --actual_resume ./models/ldm/stable-diffusion-v1/model.ckpt \ + -n my_cat \ + --gpus 0, \ + --data_root D:/textual-inversion/my_cat \ + --init_word 'cat' +``` + +During the training process, files will be created in +/logs/[project][time][project]/ where you can see the process. + +Conditioning contains the training prompts inputs, reconstruction the +input images for the training epoch samples, samples scaled for a +sample of the prompt and one with the init word provided. + +On a RTX3090, the process for SD will take ~1h @1.6 iterations/sec. + +_Note_: According to the associated paper, the optimal number of +images is 3-5. Your model may not converge if you use more images than +that. + +Training will run indefinitely, but you may wish to stop it (with +ctrl-c) before the heat death of the universe, when you find a low +loss epoch or around ~5000 iterations. Note that you can set a fixed +limit on the number of training steps by decreasing the "max_steps" +option in configs/stable_diffusion/v1-finetune.yaml (currently set to +4000000) + +**Running** + +Once the model is trained, specify the trained .pt or .bin file when +starting dream using + +``` +(ldm) ~/stable-diffusion$ python3 ./scripts/dream.py --embedding_path /path/to/embedding.pt --full_precision +``` + +Then, to utilize your subject at the dream prompt + +``` +dream> "a photo of *" +``` + +This also works with image2image + +``` +dream> "waterfall and rainbow in the style of *" --init_img=./init-images/crude_drawing.png --strength=0.5 -s100 -n4 +``` + +For .pt files it's also possible to train multiple tokens (modify the placeholder string in `configs/stable-diffusion/v1-finetune.yaml`) and combine LDM checkpoints using: + +``` +(ldm) ~/stable-diffusion$ python3 ./scripts/merge_embeddings.py \ + --manager_ckpts /path/to/first/embedding.pt /path/to/second/embedding.pt [...] \ + --output_path /path/to/output/embedding.pt +``` + +Credit goes to rinongal and the repository located at https://github.com/rinongal/textual_inversion Please see the repository and associated paper for details and limitations. diff --git a/docs/features/UPSCALE.md b/docs/features/UPSCALE.md new file mode 100644 index 0000000000..51f49a70d8 --- /dev/null +++ b/docs/features/UPSCALE.md @@ -0,0 +1,105 @@ +# **GFPGAN and Real-ESRGAN Support** + +The script also provides the ability to do face restoration and +upscaling with the help of GFPGAN and Real-ESRGAN respectively. + +As of version 1.14, environment.yaml will install the Real-ESRGAN package into the +standard install location for python packages, and will put GFPGAN into a subdirectory of "src" +in the stable-diffusion directory. +(The reason for this is that the standard GFPGAN distribution has a minor bug that adversely affects image +color.) Upscaling with Real-ESRGAN should "just work" without further intervention. Simply pass the --upscale (-U) +option on the dream> command line, or indicate the desired scale on the popup in the Web GUI. + +For **GFPGAN** to work, there is one additional step needed. You will need to download and +copy the GFPGAN [models file](https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth) +into **src/gfpgan/experiments/pretrained_models**. On Mac and Linux systems, here's how you'd do it using +**wget**: +~~~~ +> wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth src/gfpgan/experiments/pretrained_models/ +~~~~ + +Make sure that you're in the stable-diffusion directory when you do this. + +Alternatively, if you have GFPGAN installed elsewhere, or if you are using +an earlier version of this package which asked you to install GFPGAN in a +sibling directory, you may use the `--gfpgan_dir` argument with `dream.py` to set a +custom path to your GFPGAN directory. _There are other GFPGAN related +boot arguments if you wish to customize further._ + +**Note: Internet connection needed:** +Users whose GPU machines are isolated from the Internet (e.g. on a +University cluster) should be aware that the first time you run +dream.py with GFPGAN and Real-ESRGAN turned on, it will try to +download model files from the Internet. To rectify this, you may run +`python3 scripts/preload_models.py` after you have installed GFPGAN +and all its dependencies. + +**Usage** + +You will now have access to two new prompt arguments. + +**Upscaling** + +`-U : ` + +The upscaling prompt argument takes two values. The first value is a +scaling factor and should be set to either `2` or `4` only. This will +either scale the image 2x or 4x respectively using different models. + +You can set the scaling stength between `0` and `1.0` to control +intensity of the of the scaling. This is handy because AI upscalers +generally tend to smooth out texture details. If you wish to retain +some of those for natural looking results, we recommend using values +between `0.5 to 0.8`. + +If you do not explicitly specify an upscaling_strength, it will +default to 0.75. + +**Face Restoration** + +`-G : ` + +This prompt argument controls the strength of the face restoration +that is being applied. Similar to upscaling, values between `0.5 to 0.8` are recommended. + +You can use either one or both without any conflicts. In cases where +you use both, the image will be first upscaled and then the face +restoration process will be executed to ensure you get the highest +quality facial features. + +`--save_orig` + +When you use either `-U` or `-G`, the final result you get is upscaled +or face modified. If you want to save the original Stable Diffusion +generation, you can use the `-save_orig` prompt argument to save the +original unaffected version too. + +**Example Usage** + +``` +dream > superman dancing with a panda bear -U 2 0.6 -G 0.4 +``` + +This also works with img2img: + +``` +dream> a man wearing a pineapple hat -I path/to/your/file.png -U 2 0.5 -G 0.6 +``` + +**Note** + +GFPGAN and Real-ESRGAN are both memory intensive. In order to avoid +crashes and memory overloads during the Stable Diffusion process, +these effects are applied after Stable Diffusion has completed its +work. + +In single image generations, you will see the output right away but +when you are using multiple iterations, the images will first be +generated and then upscaled and face restored after that process is +complete. While the image generation is taking place, you will still +be able to preview the base images. + +If you wish to stop during the image generation but want to upscale or +face restore a particular generated image, pass it again with the same +prompt and generated seed along with the `-U` and `-G` prompt +arguments to perform those actions. diff --git a/VARIATIONS.md b/docs/features/VARIATIONS.md similarity index 58% rename from VARIATIONS.md rename to docs/features/VARIATIONS.md index cb42ddfd0e..c1798ae759 100644 --- a/VARIATIONS.md +++ b/docs/features/VARIATIONS.md @@ -1,28 +1,28 @@ -# Cheat Sheat for Generating Variations +# **Variations** -Release 1.13 of SD-Dream adds support for image variations. There are two things that you can do: +Release 1.13 of SD-Dream adds support for image variations. -1. Generate a series of systematic variations of an image, given a -prompt. The amount of variation from one image to the next can be -controlled. +You are able to do the following: -2. Given two or more variations that you like, you can combine them in -a weighted fashion +1. Generate a series of systematic variations of an image, given a prompt. The amount of variation from one image to the next can be controlled. -This cheat sheet provides a quick guide for how this works in -practice, using variations to create the desired image of Xena, -Warrior Princess. +2. Given two or more variations that you like, you can combine them in a weighted fashion. -## Step 1 -- find a base image that you like +--- -The prompt we will use throughout is "lucy lawless as xena, warrior -princess, character portrait, high resolution." This will be indicated -as "prompt" in the examples below. +This cheat sheet provides a quick guide for how this works in practice, using variations to create the desired image of Xena, Warrior Princess. -First we let SD create a series of images in the usual way, in this case -requesting six iterations: +--- -~~~ +## Step 1 -- Find a base image that you like + +The prompt we will use throughout is `lucy lawless as xena, warrior princess, character portrait, high resolution.` + +This will be indicated as `prompt` in the examples below. + +First we let SD create a series of images in the usual way, in this case requesting six iterations: + +``` dream> lucy lawless as xena, warrior princess, character portrait, high resolution -n6 ... Outputs: @@ -32,19 +32,21 @@ Outputs: ./outputs/Xena/000001.2224800325.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -S2224800325 ./outputs/Xena/000001.465250761.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -S465250761 ./outputs/Xena/000001.3357757885.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -S3357757885 -~~~ +``` The one with seed 3357757885 looks nice: - + -Let's try to generate some variations. Using the same seed, we pass -the argument -v0.1 (or --variant_amount), which generates a series of -variations each differing by a variation amount of 0.2. This number -ranges from 0 to 1.0, with higher numbers being larger amounts of +--- + +## Step 2 - Generating Variations + +Let's try to generate some variations. Using the same seed, we pass the argument `-v0.1` (or --variant_amount), which generates a series of +variations each differing by a variation amount of 0.2. This number ranges from `0` to `1.0`, with higher numbers being larger amounts of variation. -~~~ +``` dream> "prompt" -n6 -S3357757885 -v0.2 ... Outputs: @@ -54,45 +56,36 @@ Outputs: ./outputs/Xena/000002.4116285959.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 4116285959:0.2 -S3357757885 ./outputs/Xena/000002.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 1614299449:0.2 -S3357757885 ./outputs/Xena/000002.1335553075.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 1335553075:0.2 -S3357757885 -~~~ +``` -Note that the output for each image has a -V option giving the -"variant subseed" for that image, consisting of a seed followed by the +### **Variation Sub Seeding** + +Note that the output for each image has a `-V` option giving the "variant subseed" for that image, consisting of a seed followed by the variation amount used to generate it. -This gives us a series of closely-related variations, including the -two shown here. +This gives us a series of closely-related variations, including the two shown here. - - + + +I like the expression on Xena's face in the first one (subseed 3647897225), and the armor on her shoulder in the second one (subseed 1614299449). Can we combine them to get the best of both worlds? -I like the expression on Xena's face in the first one (subseed -3647897225), and the armor on her shoulder in the second one (subseed -1614299449). Can we combine them to get the best of both worlds? - -We combine the two variations using -V (--with_variations). Again, we -must provide the seed for the originally-chosen image in order for +We combine the two variations using `-V` (--with_variations). Again, we must provide the seed for the originally-chosen image in order for this to work. -~~~ +``` dream> "prompt" -S3357757885 -V3647897225,0.1;1614299449,0.1 Outputs: ./outputs/Xena/000003.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1 -S3357757885 -~~~ +``` -Here we are providing equal weights (0.1 and 0.1) for both the -subseeds. The resulting image is close, but not exactly what I -wanted: +Here we are providing equal weights (0.1 and 0.1) for both the subseeds. The resulting image is close, but not exactly what I wanted: - + -We could either try combining the images with different weights, or we -can generate more variations around the almost-but-not-quite image. We -do the latter, using both the -V (combining) and -v (variation -strength) options. Note that we use -n6 to generate 6 variations: +We could either try combining the images with different weights, or we can generate more variations around the almost-but-not-quite image. We do the latter, using both the `-V` (combining) and `-v` (variation strength) options. Note that we use `-n6` to generate 6 variations: -~~~~ +``` dream> "prompt" -S3357757885 -V3647897225,0.1;1614299449,0.1 -v0.05 -n6 Outputs: ./outputs/Xena/000004.3279757577.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,3279757577:0.05 -S3357757885 @@ -101,13 +94,11 @@ Outputs: ./outputs/Xena/000004.2664260391.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2664260391:0.05 -S3357757885 ./outputs/Xena/000004.1642517170.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,1642517170:0.05 -S3357757885 ./outputs/Xena/000004.2183375608.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2183375608:0.05 -S3357757885 -~~~~ +``` -This produces six images, all slight variations on the combination of -the chosen two images. Here's the one I like best: +This produces six images, all slight variations on the combination of the chosen two images. Here's the one I like best: - + -As you can see, this is a very powerful too, which when combined with -subprompt weighting, gives you great control over the content and +As you can see, this is a very powerful tool, which when combined with subprompt weighting, gives you great control over the content and quality of your generated images. diff --git a/docs/features/WEB.md b/docs/features/WEB.md new file mode 100644 index 0000000000..9eb6aa5bfc --- /dev/null +++ b/docs/features/WEB.md @@ -0,0 +1,13 @@ +# Barebones Web Server + +As of version 1.10, this distribution comes with a bare bones web server (see screenshot). To use it, run the `dream.py` script by adding the `**--web**` option. + +``` +(ldm) ~/stable-diffusion$ python3 scripts/dream.py --web +``` + +You can then connect to the server by pointing your web browser at http://localhost:9090, or to the network name or IP address of the server. + +Kudos to [Tesseract Cat](https://github.com/TesseractCat) for contributing this code, and to [dagf2101](https://github.com/dagf2101) for refining it. + +![Dream Web Server](../assets/dream_web_server.png) diff --git a/docs/help/TROUBLESHOOT.md b/docs/help/TROUBLESHOOT.md new file mode 100644 index 0000000000..cac5dddf23 --- /dev/null +++ b/docs/help/TROUBLESHOOT.md @@ -0,0 +1,68 @@ +# **Frequently Asked Questions** + +Here are a few common installation problems and their solutions. Often these are caused by incomplete installations or crashes during the +install process. + +--- + +**QUESTION** + +During `conda env create -f environment.yaml`, conda hangs indefinitely. + +**SOLUTION** + +Enter the stable-diffusion directory and completely remove the `src` directory and all its contents. The safest way to do this is to enter the stable-diffusion directory and give the command `git clean -f`. If this still doesn't fix the problem, try "conda clean -all" and then restart at the `conda env create` step. + +--- + +**QUESTION** + +`dream.py` crashes with the complaint that it can't find `ldm.simplet2i.py`. Or it complains that function is being passed incorrect parameters. + +**SOLUTION** + +Reinstall the stable diffusion modules. Enter the `stable-diffusion` directory and give the command `pip install -e .` + +--- + +**QUESTION** + +`dream.py` dies, complaining of various missing modules, none of which starts with `ldm``. + +**SOLUTION** + +From within the `stable-diffusion` directory, run `conda env update -f environment.yaml` This is also frequently the solution to +complaints about an unknown function in a module. + +--- + +**QUESTION** + +There's a feature or bugfix in the Stable Diffusion GitHub that you want to try out. + +**SOLUTION** + +**Main Branch** + +If the fix/feature is on the `main` branch, enter the stable-diffusion directory and do a `git pull`. + +Usually this will be sufficient, but if you start to see errors about missing or incorrect modules, use the command `pip install -e .` and/or `conda env update -f environment.yaml` (These commands won't break anything.) + +**Sub Branch** + +If the feature/fix is on a branch (e.g. "_foo-bugfix_"), the recipe is similar, but do a `git pull `. + +**Not Committed** + +If the feature/fix is in a pull request that has not yet been made part of the main branch or a feature/bugfix branch, then from the page for the desired pull request, look for the line at the top that reads "_xxxx wants to merge xx commits into lstein:main from YYYYYY_". Copy the URL in YYYY. It should have the format `https://github.com//stable-diffusion/tree/` + +Then **go to the directory above stable-diffusion** and rename the directory to "_stable-diffusion.lstein_", "_stable-diffusion.old_", or anything else. You can then git clone the branch that contains the pull request: + +``` +git clone https://github.com//stable-diffusion/tree/ +``` + +You will need to go through the install procedure again, but it should be fast because all the dependencies are already loaded. + +--- diff --git a/docs/installation/INSTALL_LINUX.md b/docs/installation/INSTALL_LINUX.md new file mode 100644 index 0000000000..b7a6cd8ff0 --- /dev/null +++ b/docs/installation/INSTALL_LINUX.md @@ -0,0 +1,89 @@ +# **Linux Installation** + +1. You will need to install the following prerequisites if they are not already available. Use your operating system's preferred installer + +- Python (version 3.8.5 recommended; higher may work) +- git + +2. Install the Python Anaconda environment manager. + +``` +~$ wget https://repo.anaconda.com/archive/Anaconda3-2022.05-Linux-x86_64.sh +~$ chmod +x Anaconda3-2022.05-Linux-x86_64.sh +~$ ./Anaconda3-2022.05-Linux-x86_64.sh +``` + +After installing anaconda, you should log out of your system and log back in. If the installation +worked, your command prompt will be prefixed by the name of the current anaconda environment - `(base)`. + +3. Copy the stable-diffusion source code from GitHub: + +``` +(base) ~$ git clone https://github.com/lstein/stable-diffusion.git +``` + +This will create stable-diffusion folder where you will follow the rest of the steps. + +4. Enter the newly-created stable-diffusion folder. From this step forward make sure that you are working in the stable-diffusion directory! + +``` +(base) ~$ cd stable-diffusion +(base) ~/stable-diffusion$ +``` + +5. Use anaconda to copy necessary python packages, create a new python environment named `ldm` and activate the environment. + +``` +(base) ~/stable-diffusion$ conda env create -f environment.yaml +(base) ~/stable-diffusion$ conda activate ldm +(ldm) ~/stable-diffusion$ +``` + +After these steps, your command prompt will be prefixed by `(ldm)` as shown above. + +6. Load a couple of small machine-learning models required by stable diffusion: + +``` +(ldm) ~/stable-diffusion$ python3 scripts/preload_models.py +``` + +Note that this step is necessary because I modified the original just-in-time model loading scheme to allow the script to work on GPU machines that are not internet connected. See [Preload Models](../features/OTHER.md#preload-models) + +7. Now you need to install the weights for the stable diffusion model. + +- For running with the released weights, you will first need to set up an acount with Hugging Face (https://huggingface.co). +- Use your credentials to log in, and then point your browser at https://huggingface.co/CompVis/stable-diffusion-v-1-4-original. +- You may be asked to sign a license agreement at this point. +- Click on "Files and versions" near the top of the page, and then click on the file named "sd-v1-4.ckpt". You'll be taken to a page that prompts you to click the "download" link. Save the file somewhere safe on your local machine. + +Now run the following commands from within the stable-diffusion directory. This will create a symbolic link from the stable-diffusion model.ckpt file, to the true location of the sd-v1-4.ckpt file. + +``` +(ldm) ~/stable-diffusion$ mkdir -p models/ldm/stable-diffusion-v1 +(ldm) ~/stable-diffusion$ ln -sf /path/to/sd-v1-4.ckpt models/ldm/stable-diffusion-v1/model.ckpt +``` + +8. Start generating images! + +``` +# for the pre-release weights use the -l or --liaon400m switch +(ldm) ~/stable-diffusion$ python3 scripts/dream.py -l + +# for the post-release weights do not use the switch +(ldm) ~/stable-diffusion$ python3 scripts/dream.py + +# for additional configuration switches and arguments, use -h or --help +(ldm) ~/stable-diffusion$ python3 scripts/dream.py -h +``` + +9. Subsequently, to relaunch the script, be sure to run "conda activate ldm" (step 5, second command), enter the `stable-diffusion` directory, and then launch the dream script (step 8). If you forget to activate the ldm environment, the script will fail with multiple `ModuleNotFound` errors. + +### Updating to newer versions of the script + +This distribution is changing rapidly. If you used the `git clone` method (step 5) to download the stable-diffusion directory, then to update to the latest and greatest version, launch the Anaconda window, enter `stable-diffusion` and type: + +``` +(ldm) ~/stable-diffusion$ git pull +``` + +This will bring your local copy into sync with the remote one. diff --git a/README-Mac-MPS.md b/docs/installation/INSTALL_MAC.md similarity index 70% rename from README-Mac-MPS.md rename to docs/installation/INSTALL_MAC.md index 99d3181f2e..369aa877a5 100644 --- a/README-Mac-MPS.md +++ b/docs/installation/INSTALL_MAC.md @@ -1,21 +1,20 @@ -# macOS Instructions +# **macOS Instructions** Requirements - macOS 12.3 Monterey or later - Python - Patience -- Apple Silicon* +- Apple Silicon\* -*I haven't tested any of this on Intel Macs but I have read that one person got -it to work, so Apple Silicon might not be requried. +\*I haven't tested any of this on Intel Macs but I have read that one person got it to work, so Apple Silicon might not be requried. -Things have moved really fast and so these instructions change often and are -often out-of-date. One of the problems is that there are so many different ways to -run this. +Things have moved really fast and so these instructions change often +and are often out-of-date. One of the problems is that there are so +many different ways to run this. -We are trying to build a testing setup so that when we make changes it doesn't -always break. +We are trying to build a testing setup so that when we make changes it +doesn't always break. How to (this hasn't been 100% tested yet): @@ -23,7 +22,7 @@ First get the weights checkpoint download started - it's big: 1. Sign up at https://huggingface.co 2. Go to the [Stable diffusion diffusion model page](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original) -3. Accept the terms and click Access Repository: +3. Accept the terms and click Access Repository: 4. Download [sd-v1-4.ckpt (4.27 GB)](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/blob/main/sd-v1-4.ckpt) and note where you have saved it (probably the Downloads folder) While that is downloading, open Terminal and run the following commands one at a time. @@ -38,9 +37,8 @@ While that is downloading, open Terminal and run the following commands one at a # 2. No pyenv # # If you don't know what we are talking about, choose 2. -# # NOW EITHER DO -# 1. Installing alongside pyenv +# 1. Installing alongside pyenv brew install pyenv-virtualenv # you might have this from before, no problem pyenv install anaconda3-2022.05 @@ -48,7 +46,7 @@ pyenv virtualenv anaconda3-2022.05 eval "$(pyenv init -)" pyenv activate anaconda3-2022.05 -# OR, +# OR, # 2. Installing standalone # install python 3, git, cmake, protobuf: brew install cmake protobuf rust @@ -93,42 +91,37 @@ The original scripts should work as well. python scripts/orig_scripts/txt2img.py --prompt "a photograph of an astronaut riding a horse" --plms ``` -Note, `export PIP_EXISTS_ACTION=w` is a precaution to fix `conda env create -f environment-mac.yaml` -never finishing in some situations. So it isn't required but wont hurt. +Note, `export PIP_EXISTS_ACTION=w` is a precaution to fix `conda env +create -f environment-mac.yaml` never finishing in some situations. So +it isn't required but wont hurt. -After you follow all the instructions and run dream.py you might get several -errors. Here's the errors I've seen and found solutions for. +After you follow all the instructions and run dream.py you might get several errors. Here's the errors I've seen and found solutions for. ### Is it slow? Be sure to specify 1 sample and 1 iteration. - python ./scripts/orig_scripts/txt2img.py --prompt "ocean" --ddim_steps 5 --n_samples 1 --n_iter 1 + python ./scripts/orig_scripts/txt2img.py --prompt "ocean" --ddim_steps 5 --n_samples 1 --n_iter 1 ### Doesn't work anymore? -PyTorch nightly includes support for MPS. Because of this, this setup is -inherently unstable. One morning I woke up and it no longer worked no matter -what I did until I switched to miniforge. However, I have another Mac that works -just fine with Anaconda. If you can't get it to work, please search a little -first because many of the errors will get posted and solved. If you can't find -a solution please [create an issue](https://github.com/lstein/stable-diffusion/issues). +PyTorch nightly includes support for MPS. Because of this, this setup is inherently unstable. One morning I woke up and it no longer worked no matter what I did until I switched to miniforge. However, I have another Mac that works just fine with Anaconda. If you can't get it to work, please search a little first because many of the errors will get posted and solved. If you can't find a solution please [create an issue](https://github.com/lstein/stable-diffusion/issues). One debugging step is to update to the latest version of PyTorch nightly. - conda install pytorch torchvision torchaudio -c pytorch-nightly + conda install pytorch torchvision torchaudio -c pytorch-nightly If `conda env create -f environment-mac.yaml` takes forever run this. - git clean -f + git clean -f And run this. - conda clean --yes --all + conda clean --yes --all Or you could reset Anaconda. - conda update --force-reinstall -y -n base -c defaults conda + conda update --force-reinstall -y -n base -c defaults conda ### "No module named cv2", torch, 'ldm', 'transformers', 'taming', etc. @@ -145,17 +138,14 @@ The cause of this error is long so it's below. Third, if it says you're missing taming you need to rebuild your virtual environment. - conda env remove -n ldm - conda env create -f environment-mac.yaml + conda env remove -n ldm + conda env create -f environment-mac.yaml -Fourth, If you have activated the ldm virtual environment and tried rebuilding -it, maybe the problem could be that I have something installed that -you don't and you'll just need to manually install it. Make sure you -activate the virtual environment so it installs there instead of +Fourth, If you have activated the ldm virtual environment and tried rebuilding it, maybe the problem could be that I have something installed that you don't and you'll just need to manually install it. Make sure you activate the virtual environment so it installs there instead of globally. - conda activate ldm - pip install *name* + conda activate ldm + pip install *name* You might also need to install Rust (I mention this again below). @@ -167,8 +157,8 @@ picking the wrong one. More specifically, preload_models.py and dream.py says to find the first `python3` in the path environment variable. You can see which one it is picking with `which python3`. These are the mostly likely paths you'll see. - % which python3 - /usr/bin/python3 + % which python3 + /usr/bin/python3 The above path is part of the OS. However, that path is a stub that asks you if you want to install Xcode. If you have Xcode installed already, @@ -176,14 +166,14 @@ you want to install Xcode. If you have Xcode installed already, /Applications/Xcode.app/Contents/Developer/usr/bin/python3 (depending on which Xcode you've selected with `xcode-select`). - % which python3 - /opt/homebrew/bin/python3 + % which python3 + /opt/homebrew/bin/python3 If you installed python3 with Homebrew and you've modified your path to search for Homebrew binaries before system ones, you'll see the above path. - % which python - /opt/anaconda3/bin/python + % which python + /opt/anaconda3/bin/python If you drop the "3" you get an entirely different python. Note: starting in macOS 12.3, /usr/bin/python no longer exists (it was python 2 anyway). @@ -191,8 +181,8 @@ macOS 12.3, /usr/bin/python no longer exists (it was python 2 anyway). If you have Anaconda installed, this is what you'll see. There is a /opt/anaconda3/bin/python3 also. - (ldm) % which python - /Users/name/miniforge3/envs/ldm/bin/python + (ldm) % which python + /Users/name/miniforge3/envs/ldm/bin/python This is what you'll see if you have miniforge and you've correctly activated the ldm environment. This is the goal. @@ -216,11 +206,11 @@ Tired of waiting for your renders to finish before you can see if it works? Reduce the steps! The image quality will be horrible but at least you'll get quick feedback. - python ./scripts/txt2img.py --prompt "ocean" --ddim_steps 5 --n_samples 1 --n_iter 1 + python ./scripts/txt2img.py --prompt "ocean" --ddim_steps 5 --n_samples 1 --n_iter 1 ### OSError: Can't load tokenizer for 'openai/clip-vit-large-patch14'... - python scripts/preload_models.py + python scripts/preload_models.py ### "The operator [name] is not current implemented for the MPS device." (sic) @@ -237,16 +227,16 @@ The lstein branch includes this fix in [environment-mac.yaml](https://github.com I have not seen this error because I had Rust installed on my computer before I started playing with Stable Diffusion. The fix is to install Rust. - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh ### How come `--seed` doesn't work? First this: > Completely reproducible results are not guaranteed across PyTorch -releases, individual commits, or different platforms. Furthermore, -results may not be reproducible between CPU and GPU executions, even -when using identical seeds. +> releases, individual commits, or different platforms. Furthermore, +> results may not be reproducible between CPU and GPU executions, even +> when using identical seeds. [PyTorch docs](https://pytorch.org/docs/stable/notes/randomness.html) @@ -255,7 +245,7 @@ still working on it. ### libiomp5.dylib error? - OMP: Error #15: Initializing libiomp5.dylib, but found libomp.dylib already initialized. + OMP: Error #15: Initializing libiomp5.dylib, but found libomp.dylib already initialized. You are likely using an Intel package by mistake. Be sure to run conda with the environment variable `CONDA_SUBDIR=osx-arm64`, like so: @@ -267,7 +257,7 @@ a dependency. [nomkl](https://stackoverflow.com/questions/66224879/what-is-the-n is a metapackage designed to prevent this, by making it impossible to install `mkl`, but if your environment is already broken it may not work. -Do *not* use `os.environ['KMP_DUPLICATE_LIB_OK']='True'` or equivalents as this +Do _not_ use `os.environ['KMP_DUPLICATE_LIB_OK']='True'` or equivalents as this masks the underlying issue of using Intel packages. ### Not enough memory. @@ -282,7 +272,7 @@ affect the quality of the images though. See [this issue](https://github.com/CompVis/stable-diffusion/issues/71). -### "Error: product of dimension sizes > 2**31'" +### "Error: product of dimension sizes > 2\*\*31'" This error happens with img2img, which I haven't played with too much yet. But I know it's because your image is too big or the resolution @@ -292,7 +282,7 @@ output size (which is the default). However, if you're using that size and you get the above error, try 256 x 256 or 512 x 256 or something as the source image. -BTW, 2**31-1 = [2,147,483,647](https://en.wikipedia.org/wiki/2,147,483,647#In_computing), which is also 32-bit signed [LONG_MAX](https://en.wikipedia.org/wiki/C_data_types) in C. +BTW, 2\*\*31-1 = [2,147,483,647](https://en.wikipedia.org/wiki/2,147,483,647#In_computing), which is also 32-bit signed [LONG_MAX](https://en.wikipedia.org/wiki/C_data_types) in C. ### I just got Rickrolled! Do I have a virus? @@ -332,13 +322,30 @@ change instead. This is a 32-bit vs 16-bit problem. What? Intel? On an Apple Silicon? - Intel MKL FATAL ERROR: This system does not meet the minimum requirements for use of the Intel(R) Math Kernel Library. - The processor must support the Intel(R) Supplemental Streaming SIMD Extensions 3 (Intel(R) SSSE3) instructions. - The processor must support the Intel(R) Streaming SIMD Extensions 4.2 (Intel(R) SSE4.2) instructions. - The processor must support the Intel(R) Advanced Vector Extensions (Intel(R) AVX) instructions. + Intel MKL FATAL ERROR: This system does not meet the minimum requirements for use of the Intel(R) Math Kernel Library. + The processor must support the Intel(R) Supplemental Streaming SIMD Extensions 3 (Intel(R) SSSE3) instructions. + The processor must support the Intel(R) Streaming SIMD Extensions 4.2 (Intel(R) SSE4.2) instructions. + The processor must support the Intel(R) Advanced Vector Extensions (Intel(R) AVX) instructions. This is due to the Intel `mkl` package getting picked up when you try to install something that depends on it-- Rosetta can translate some Intel instructions but not the specialized ones here. To avoid this, make sure to use the environment variable `CONDA_SUBDIR=osx-arm64`, which restricts the Conda environment to only use ARM packages, and use `nomkl` as described above. + +### input types 'tensor<2x1280xf32>' and 'tensor<\*xf16>' are not broadcast compatible + +May appear when just starting to generate, e.g.: + +``` +dream> clouds +Generating: 0%| | 0/1 [00:00' and 'tensor<*xf16>' are not broadcast compatible +LLVM ERROR: Failed to infer result type(s). +Abort trap: 6 +/Users/[...]/opt/anaconda3/envs/ldm/lib/python3.9/multiprocessing/resource_tracker.py:216: UserWarning: resource_tracker: There appear to be 1 leaked semaphore objects to clean up at shutdown + warnings.warn('resource_tracker: There appear to be %d ' +``` + +Macs do not support autocast/mixed-precision. Supply `--full_precision` to use float32 everywhere. diff --git a/docs/installation/INSTALL_WINDOWS.md b/docs/installation/INSTALL_WINDOWS.md new file mode 100644 index 0000000000..238988a15a --- /dev/null +++ b/docs/installation/INSTALL_WINDOWS.md @@ -0,0 +1,112 @@ +# **Windows Installation** + +## **Notebook install (semi-automated)** + +We have a [Jupyter +notebook](https://github.com/lstein/stable-diffusion/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb) +with cell-by-cell installation steps. It will download the code in +this repo as one of the steps, so instead of cloning this repo, simply +download the notebook from the link above and load it up in VSCode +(with the appropriate extensions installed)/Jupyter/JupyterLab and +start running the cells one-by-one. + +Note that you will need NVIDIA drivers, Python 3.10, and Git installed +beforehand - simplified [step-by-step +instructions](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install) +are available in the wiki (you'll only need steps 1, 2, & 3 ). + +## **Manual Install** + +### **pip** + +See [Easy-peasy Windows install](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install) +in the wiki + +### **Conda** + +1. Install Anaconda3 (miniconda3 version) from here: https://docs.anaconda.com/anaconda/install/windows/ + +2. Install Git from here: https://git-scm.com/download/win + +3. Launch Anaconda from the Windows Start menu. This will bring up a command window. Type all the remaining commands in this window. + +4. Run the command: + +``` +git clone https://github.com/lstein/stable-diffusion.git +``` + +This will create stable-diffusion folder where you will follow the rest of the steps. + +5. Enter the newly-created stable-diffusion folder. From this step forward make sure that you are working in the stable-diffusion directory! + +``` +cd stable-diffusion +``` + +6. Run the following two commands: + +``` +conda env create -f environment.yaml (step 6a) +conda activate ldm (step 6b) +``` + +This will install all python requirements and activate the "ldm" +environment which sets PATH and other environment variables properly. + +7. Run the command: + +``` +python scripts\preload_models.py +``` + +This installs several machine learning models that stable diffusion requires. + +Note: This step is required. This was done because some users may might be blocked by firewalls or have limited internet connectivity for the models to be downloaded just-in-time. + +8. Now you need to install the weights for the big stable diffusion model. + +- For running with the released weights, you will first need to set up an acount with Hugging Face (https://huggingface.co). +- Use your credentials to log in, and then point your browser at https://huggingface.co/CompVis/stable-diffusion-v-1-4-original. +- You may be asked to sign a license agreement at this point. +- Click on "Files and versions" near the top of the page, and then click on the file named `sd-v1-4.ckpt`. You'll be taken to a page that + prompts you to click the "download" link. Now save the file somewhere safe on your local machine. +- The weight file is >4 GB in size, so + downloading may take a while. + +Now run the following commands from **within the stable-diffusion directory** to copy the weights file to the right place: + +``` +mkdir -p models\ldm\stable-diffusion-v1 +copy C:\path\to\sd-v1-4.ckpt models\ldm\stable-diffusion-v1\model.ckpt +``` + +Please replace `C:\path\to\sd-v1.4.ckpt` with the correct path to wherever you stashed this file. If you prefer not to copy or move the .ckpt file, +you may instead create a shortcut to it from within `models\ldm\stable-diffusion-v1\`. + +9. Start generating images! + +``` +# for the pre-release weights +python scripts\dream.py -l + +# for the post-release weights +python scripts\dream.py +``` + +10. Subsequently, to relaunch the script, first activate the Anaconda command window (step 3),enter the stable-diffusion directory (step 5, `cd \path\to\stable-diffusion`), run `conda activate ldm` (step 6b), and then launch the dream script (step 9). + +**Note:** Tildebyte has written an alternative ["Easy peasy Windows +install"](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install) +which uses the Windows Powershell and pew. If you are having trouble with Anaconda on Windows, give this a try (or try it first!) + +### Updating to newer versions of the script + +This distribution is changing rapidly. If you used the `git clone` method (step 5) to download the stable-diffusion directory, then to update to the latest and greatest version, launch the Anaconda window, enter `stable-diffusion`, and type: + +``` +git pull +conda env update -f environment.yaml +``` + +This will bring your local copy into sync with the remote one. diff --git a/environment-mac.yaml b/environment-mac.yaml index 44cd1efcd6..8067c712ee 100644 --- a/environment-mac.yaml +++ b/environment-mac.yaml @@ -1,33 +1,29 @@ name: ldm channels: - - pytorch-nightly + - pytorch - conda-forge dependencies: - - python==3.9.13 + - python==3.10.5 - pip==22.2.2 - # pytorch-nightly, left unpinned + # pytorch left unpinned - pytorch - - torchmetrics - torchvision # I suggest to keep the other deps sorted for convenience. - # If you wish to upgrade to 3.10, try to run this: + # To determine what the latest versions should be, run: # # ```shell - # CONDA_CMD=conda - # sed -E 's/python==3.9.13/python==3.10.5/;s/ldm/ldm-3.10/;21,99s/- ([^=]+)==.+/- \1/' environment-mac.yaml > /tmp/environment-mac-updated.yml - # CONDA_SUBDIR=osx-arm64 $CONDA_CMD env create -f /tmp/environment-mac-updated.yml && $CONDA_CMD list -n ldm-3.10 | awk ' {print " - " $1 "==" $2;} ' + # sed -E 's/ldm/ldm-updated/;20,99s/- ([^=]+)==.+/- \1/' environment-mac.yaml > environment-mac-updated.yml + # CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac-updated.yml && conda list -n ldm-updated | awk ' {print " - " $1 "==" $2;} ' # ``` - # - # Unfortunately, as of 2022-08-31, this fails at the pip stage. - albumentations==1.2.1 - coloredlogs==15.0.1 - einops==0.4.1 - grpcio==1.46.4 - - humanfriendly - - imageio-ffmpeg==0.4.7 + - humanfriendly==10.0 - imageio==2.21.2 + - imageio-ffmpeg==0.4.7 - imgaug==0.4.0 - kornia==0.6.7 - mpmath==1.2.1 @@ -36,23 +32,23 @@ dependencies: - omegaconf==2.1.1 - onnx==1.12.0 - onnxruntime==1.12.1 - - opencv==4.6.0 - pudb==2022.1 - pytorch-lightning==1.6.5 - scipy==1.9.1 - streamlit==1.12.2 - sympy==1.10.1 - tensorboard==2.9.0 - - transformers==4.21.2 + - torchmetrics==0.9.3 - pip: - - invisible-watermark - - test-tube - - tokenizers - - torch-fidelity - - -e git+https://github.com/huggingface/diffusers.git@v0.2.4#egg=diffusers + - opencv-python==4.6.0 + - realesrgan==0.2.5.0 + - test-tube==0.7.5 + - transformers==4.21.2 + - torch-fidelity==0.3.0 - -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers - -e git+https://github.com/openai/CLIP.git@main#egg=clip - -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion + - -e git+https://github.com/lstein/GFPGAN@fix-dark-cast-images#egg=gfpgan - -e . variables: PYTORCH_ENABLE_MPS_FALLBACK: 1 diff --git a/environment.yaml b/environment.yaml index 7d5b4fe9e3..db754a9185 100644 --- a/environment.yaml +++ b/environment.yaml @@ -1,4 +1,4 @@ -name: ldm +name: sd-ldm channels: - pytorch - defaults @@ -11,12 +11,13 @@ dependencies: - numpy=1.19.2 - pip: - albumentations==0.4.3 - - opencv-python==4.1.2.30 + - opencv-python==4.5.5.64 - pudb==2019.2 - imageio==2.9.0 - imageio-ffmpeg==0.4.2 - pytorch-lightning==1.4.2 - omegaconf==2.1.1 + - realesrgan==0.2.5.0 - test-tube>=0.7.5 - streamlit==1.12.0 - pillow==9.2.0 @@ -28,4 +29,5 @@ dependencies: - -e git+https://github.com/openai/CLIP.git@main#egg=clip - -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers - -e git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion + - -e git+https://github.com/lstein/GFPGAN@fix-dark-cast-images#egg=gfpgan - -e . diff --git a/ldm/dream/conditioning.py b/ldm/dream/conditioning.py new file mode 100644 index 0000000000..dfa108985a --- /dev/null +++ b/ldm/dream/conditioning.py @@ -0,0 +1,96 @@ +''' +This module handles the generation of the conditioning tensors, including management of +weighted subprompts. + +Useful function exports: + +get_uc_and_c() get the conditioned and unconditioned latent +split_weighted_subpromopts() split subprompts, normalize and weight them +log_tokenization() print out colour-coded tokens and warn if truncated + +''' +import re +import torch + +def get_uc_and_c(prompt, model, log_tokens=False, skip_normalize=False): + uc = model.get_learned_conditioning(['']) + + # get weighted sub-prompts + weighted_subprompts = split_weighted_subprompts( + prompt, skip_normalize + ) + + if len(weighted_subprompts) > 1: + # i dont know if this is correct.. but it works + c = torch.zeros_like(uc) + # normalize each "sub prompt" and add it + for subprompt, weight in weighted_subprompts: + log_tokenization(subprompt, model, log_tokens) + c = torch.add( + c, + model.get_learned_conditioning([subprompt]), + alpha=weight, + ) + else: # just standard 1 prompt + log_tokenization(prompt, model, log_tokens) + c = model.get_learned_conditioning([prompt]) + return (uc, c) + +def split_weighted_subprompts(text, skip_normalize=False)->list: + """ + grabs all text up to the first occurrence of ':' + uses the grabbed text as a sub-prompt, and takes the value following ':' as weight + if ':' has no value defined, defaults to 1.0 + repeats until no text remaining + """ + prompt_parser = re.compile(""" + (?P # capture group for 'prompt' + (?:\\\:|[^:])+ # match one or more non ':' characters or escaped colons '\:' + ) # end 'prompt' + (?: # non-capture group + :+ # match one or more ':' characters + (?P # capture group for 'weight' + -?\d+(?:\.\d+)? # match positive or negative integer or decimal number + )? # end weight capture group, make optional + \s* # strip spaces after weight + | # OR + $ # else, if no ':' then match end of line + ) # end non-capture group + """, re.VERBOSE) + parsed_prompts = [(match.group("prompt").replace("\\:", ":"), float( + match.group("weight") or 1)) for match in re.finditer(prompt_parser, text)] + if skip_normalize: + return parsed_prompts + weight_sum = sum(map(lambda x: x[1], parsed_prompts)) + if weight_sum == 0: + print( + "Warning: Subprompt weights add up to zero. Discarding and using even weights instead.") + equal_weight = 1 / len(parsed_prompts) + return [(x[0], equal_weight) for x in parsed_prompts] + return [(x[0], x[1] / weight_sum) for x in parsed_prompts] + +# shows how the prompt is tokenized +# usually tokens have '' to indicate end-of-word, +# but for readability it has been replaced with ' ' +def log_tokenization(text, model, log=False): + if not log: + return + tokens = model.cond_stage_model.tokenizer._tokenize(text) + tokenized = "" + discarded = "" + usedTokens = 0 + totalTokens = len(tokens) + for i in range(0, totalTokens): + token = tokens[i].replace('', ' ') + # alternate color + s = (usedTokens % 6) + 1 + if i < model.cond_stage_model.max_length: + tokenized = tokenized + f"\x1b[0;3{s};40m{token}" + usedTokens += 1 + else: # over max token length + discarded = discarded + f"\x1b[0;3{s};40m{token}" + print(f"\n>> Tokens ({usedTokens}):\n{tokenized}\x1b[0m") + if discarded != "": + print( + f">> Tokens Discarded ({totalTokens-usedTokens}):\n{discarded}\x1b[0m" + ) diff --git a/ldm/dream/devices.py b/ldm/dream/devices.py index 7a205f6616..90bc9e97dd 100644 --- a/ldm/dream/devices.py +++ b/ldm/dream/devices.py @@ -1,4 +1,6 @@ import torch +from torch import autocast +from contextlib import contextmanager, nullcontext def choose_torch_device() -> str: '''Convenience routine for guessing which GPU device to run model on''' @@ -8,10 +10,11 @@ def choose_torch_device() -> str: return 'mps' return 'cpu' -def choose_autocast_device(device) -> str: +def choose_autocast_device(device): '''Returns an autocast compatible device from a torch device''' device_type = device.type # this returns 'mps' on M1 # autocast only supports cuda or cpu - if device_type not in ('cuda','cpu'): - return 'cpu' - return device_type + if device_type in ('cuda','cpu'): + return device_type,autocast + else: + return 'cpu',nullcontext diff --git a/ldm/dream/generator/__init__.py b/ldm/dream/generator/__init__.py new file mode 100644 index 0000000000..b48e6e19c8 --- /dev/null +++ b/ldm/dream/generator/__init__.py @@ -0,0 +1,4 @@ +''' +Initialization file for the ldm.dream.generator package +''' +from .base import Generator diff --git a/ldm/dream/generator/base.py b/ldm/dream/generator/base.py new file mode 100644 index 0000000000..9bed3df719 --- /dev/null +++ b/ldm/dream/generator/base.py @@ -0,0 +1,158 @@ +''' +Base class for ldm.dream.generator.* +including img2img, txt2img, and inpaint +''' +import torch +import numpy as np +import random +from tqdm import tqdm, trange +from PIL import Image +from einops import rearrange, repeat +from pytorch_lightning import seed_everything +from ldm.dream.devices import choose_autocast_device + +downsampling = 8 + +class Generator(): + def __init__(self,model): + self.model = model + self.seed = None + self.latent_channels = model.channels + self.downsampling_factor = downsampling # BUG: should come from model or config + self.variation_amount = 0 + self.with_variations = [] + + # this is going to be overridden in img2img.py, txt2img.py and inpaint.py + def get_make_image(self,prompt,**kwargs): + """ + Returns a function returning an image derived from the prompt and the initial image + Return value depends on the seed at the time you call it + """ + raise NotImplementedError("image_iterator() must be implemented in a descendent class") + + def set_variation(self, seed, variation_amount, with_variations): + self.seed = seed + self.variation_amount = variation_amount + self.with_variations = with_variations + + def generate(self,prompt,init_image,width,height,iterations=1,seed=None, + image_callback=None, step_callback=None, + **kwargs): + device_type,scope = choose_autocast_device(self.model.device) + make_image = self.get_make_image( + prompt, + init_image = init_image, + width = width, + height = height, + step_callback = step_callback, + **kwargs + ) + + results = [] + seed = seed if seed else self.new_seed() + seed, initial_noise = self.generate_initial_noise(seed, width, height) + with scope(device_type), self.model.ema_scope(): + for n in trange(iterations, desc='Generating'): + x_T = None + if self.variation_amount > 0: + seed_everything(seed) + target_noise = self.get_noise(width,height) + x_T = self.slerp(self.variation_amount, initial_noise, target_noise) + elif initial_noise is not None: + # i.e. we specified particular variations + x_T = initial_noise + else: + seed_everything(seed) + if self.model.device.type == 'mps': + x_T = self.get_noise(width,height) + + # make_image will do the equivalent of get_noise itself + image = make_image(x_T) + results.append([image, seed]) + if image_callback is not None: + image_callback(image, seed) + seed = self.new_seed() + return results + + def sample_to_image(self,samples): + """ + Returns a function returning an image derived from the prompt and the initial image + Return value depends on the seed at the time you call it + """ + x_samples = self.model.decode_first_stage(samples) + x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) + if len(x_samples) != 1: + raise Exception( + f'>> expected to get a single image, but got {len(x_samples)}') + x_sample = 255.0 * rearrange( + x_samples[0].cpu().numpy(), 'c h w -> h w c' + ) + return Image.fromarray(x_sample.astype(np.uint8)) + + def generate_initial_noise(self, seed, width, height): + initial_noise = None + if self.variation_amount > 0 or len(self.with_variations) > 0: + # use fixed initial noise plus random noise per iteration + seed_everything(seed) + initial_noise = self.get_noise(width,height) + for v_seed, v_weight in self.with_variations: + seed = v_seed + seed_everything(seed) + next_noise = self.get_noise(width,height) + initial_noise = self.slerp(v_weight, initial_noise, next_noise) + if self.variation_amount > 0: + random.seed() # reset RNG to an actually random state, so we can get a random seed for variations + seed = random.randrange(0,np.iinfo(np.uint32).max) + return (seed, initial_noise) + else: + return (seed, None) + + # returns a tensor filled with random numbers from a normal distribution + def get_noise(self,width,height): + """ + Returns a tensor filled with random numbers, either form a normal distribution + (txt2img) or from the latent image (img2img, inpaint) + """ + raise NotImplementedError("get_noise() must be implemented in a descendent class") + + def new_seed(self): + self.seed = random.randrange(0, np.iinfo(np.uint32).max) + return self.seed + + def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995): + ''' + Spherical linear interpolation + Args: + t (float/np.ndarray): Float value between 0.0 and 1.0 + v0 (np.ndarray): Starting vector + v1 (np.ndarray): Final vector + DOT_THRESHOLD (float): Threshold for considering the two vectors as + colineal. Not recommended to alter this. + Returns: + v2 (np.ndarray): Interpolation vector between v0 and v1 + ''' + inputs_are_torch = False + if not isinstance(v0, np.ndarray): + inputs_are_torch = True + v0 = v0.detach().cpu().numpy() + if not isinstance(v1, np.ndarray): + inputs_are_torch = True + v1 = v1.detach().cpu().numpy() + + dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1))) + if np.abs(dot) > DOT_THRESHOLD: + v2 = (1 - t) * v0 + t * v1 + else: + theta_0 = np.arccos(dot) + sin_theta_0 = np.sin(theta_0) + theta_t = theta_0 * t + sin_theta_t = np.sin(theta_t) + s0 = np.sin(theta_0 - theta_t) / sin_theta_0 + s1 = sin_theta_t / sin_theta_0 + v2 = s0 * v0 + s1 * v1 + + if inputs_are_torch: + v2 = torch.from_numpy(v2).to(self.model.device) + + return v2 + diff --git a/ldm/dream/generator/img2img.py b/ldm/dream/generator/img2img.py new file mode 100644 index 0000000000..242912d0eb --- /dev/null +++ b/ldm/dream/generator/img2img.py @@ -0,0 +1,72 @@ +''' +ldm.dream.generator.txt2img descends from ldm.dream.generator +''' + +import torch +import numpy as np +from ldm.dream.devices import choose_autocast_device +from ldm.dream.generator.base import Generator +from ldm.models.diffusion.ddim import DDIMSampler + +class Img2Img(Generator): + def __init__(self,model): + super().__init__(model) + self.init_latent = None # by get_noise() + + @torch.no_grad() + def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, + conditioning,init_image,strength,step_callback=None,**kwargs): + """ + Returns a function returning an image derived from the prompt and the initial image + Return value depends on the seed at the time you call it. + """ + + # PLMS sampler not supported yet, so ignore previous sampler + if not isinstance(sampler,DDIMSampler): + print( + f">> sampler '{sampler.__class__.__name__}' is not yet supported. Using DDIM sampler" + ) + sampler = DDIMSampler(self.model, device=self.model.device) + + sampler.make_schedule( + ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False + ) + + device_type,scope = choose_autocast_device(self.model.device) + with scope(device_type): + self.init_latent = self.model.get_first_stage_encoding( + self.model.encode_first_stage(init_image) + ) # move to latent space + + t_enc = int(strength * steps) + uc, c = conditioning + + @torch.no_grad() + def make_image(x_T): + # encode (scaled latent) + z_enc = sampler.stochastic_encode( + self.init_latent, + torch.tensor([t_enc]).to(self.model.device), + noise=x_T + ) + # decode it + samples = sampler.decode( + z_enc, + c, + t_enc, + img_callback = step_callback, + unconditional_guidance_scale=cfg_scale, + unconditional_conditioning=uc, + ) + return self.sample_to_image(samples) + + return make_image + + def get_noise(self,width,height): + device = self.model.device + init_latent = self.init_latent + assert init_latent is not None,'call to get_noise() when init_latent not set' + if device.type == 'mps': + return torch.randn_like(init_latent, device='cpu').to(device) + else: + return torch.randn_like(init_latent, device=device) diff --git a/ldm/dream/generator/inpaint.py b/ldm/dream/generator/inpaint.py new file mode 100644 index 0000000000..1b25a658b4 --- /dev/null +++ b/ldm/dream/generator/inpaint.py @@ -0,0 +1,77 @@ +''' +ldm.dream.generator.inpaint descends from ldm.dream.generator +''' + +import torch +import numpy as np +from einops import rearrange, repeat +from ldm.dream.devices import choose_autocast_device +from ldm.dream.generator.img2img import Img2Img +from ldm.models.diffusion.ddim import DDIMSampler + +class Inpaint(Img2Img): + def __init__(self,model): + self.init_latent = None + super().__init__(model) + + @torch.no_grad() + def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, + conditioning,init_image,mask_image,strength, + step_callback=None,**kwargs): + """ + Returns a function returning an image derived from the prompt and + the initial image + mask. Return value depends on the seed at + the time you call it. kwargs are 'init_latent' and 'strength' + """ + + mask_image = mask_image[0][0].unsqueeze(0).repeat(4,1,1).unsqueeze(0) + mask_image = repeat(mask_image, '1 ... -> b ...', b=1) + + # PLMS sampler not supported yet, so ignore previous sampler + if not isinstance(sampler,DDIMSampler): + print( + f">> sampler '{sampler.__class__.__name__}' is not yet supported. Using DDIM sampler" + ) + sampler = DDIMSampler(self.model, device=self.model.device) + + sampler.make_schedule( + ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False + ) + + device_type,scope = choose_autocast_device(self.model.device) + with scope(device_type): + self.init_latent = self.model.get_first_stage_encoding( + self.model.encode_first_stage(init_image) + ) # move to latent space + + t_enc = int(strength * steps) + uc, c = conditioning + + print(f">> target t_enc is {t_enc} steps") + + @torch.no_grad() + def make_image(x_T): + # encode (scaled latent) + z_enc = sampler.stochastic_encode( + self.init_latent, + torch.tensor([t_enc]).to(self.model.device), + noise=x_T + ) + + # decode it + samples = sampler.decode( + z_enc, + c, + t_enc, + img_callback = step_callback, + unconditional_guidance_scale = cfg_scale, + unconditional_conditioning = uc, + mask = mask_image, + init_latent = self.init_latent + ) + return self.sample_to_image(samples) + + return make_image + + + diff --git a/ldm/dream/generator/txt2img.py b/ldm/dream/generator/txt2img.py new file mode 100644 index 0000000000..d4cd25cb51 --- /dev/null +++ b/ldm/dream/generator/txt2img.py @@ -0,0 +1,61 @@ +''' +ldm.dream.generator.txt2img inherits from ldm.dream.generator +''' + +import torch +import numpy as np +from ldm.dream.generator.base import Generator + +class Txt2Img(Generator): + def __init__(self,model): + super().__init__(model) + + @torch.no_grad() + def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta, + conditioning,width,height,step_callback=None,**kwargs): + """ + Returns a function returning an image derived from the prompt and the initial image + Return value depends on the seed at the time you call it + kwargs are 'width' and 'height' + """ + uc, c = conditioning + + @torch.no_grad() + def make_image(x_T): + shape = [ + self.latent_channels, + height // self.downsampling_factor, + width // self.downsampling_factor, + ] + samples, _ = sampler.sample( + batch_size = 1, + S = steps, + x_T = x_T, + conditioning = c, + shape = shape, + verbose = False, + unconditional_guidance_scale = cfg_scale, + unconditional_conditioning = uc, + eta = ddim_eta, + img_callback = step_callback + ) + return self.sample_to_image(samples) + + return make_image + + + # returns a tensor filled with random numbers from a normal distribution + def get_noise(self,width,height): + device = self.model.device + if device.type == 'mps': + return torch.randn([1, + self.latent_channels, + height // self.downsampling_factor, + width // self.downsampling_factor], + device='cpu').to(device) + else: + return torch.randn([1, + self.latent_channels, + height // self.downsampling_factor, + width // self.downsampling_factor], + device=device) diff --git a/ldm/dream/pngwriter.py b/ldm/dream/pngwriter.py index 2461486b22..3f3a15891b 100644 --- a/ldm/dream/pngwriter.py +++ b/ldm/dream/pngwriter.py @@ -59,6 +59,10 @@ class PromptFormatter: switches.append(f'-H{opt.height or t2i.height}') switches.append(f'-C{opt.cfg_scale or t2i.cfg_scale}') switches.append(f'-A{opt.sampler_name or t2i.sampler_name}') +# to do: put model name into the t2i object +# switches.append(f'--model{t2i.model_name}') + if opt.seamless or t2i.seamless: + switches.append(f'--seamless') if opt.init_img: switches.append(f'-I{opt.init_img}') if opt.fit: @@ -74,6 +78,4 @@ class PromptFormatter: if opt.with_variations: formatted_variations = ','.join(f'{seed}:{weight}' for seed, weight in opt.with_variations) switches.append(f'-V{formatted_variations}') - if t2i.full_precision: - switches.append('-F') return ' '.join(switches) diff --git a/ldm/dream/readline.py b/ldm/dream/readline.py index 24a4493ad9..2aa8520acf 100644 --- a/ldm/dream/readline.py +++ b/ldm/dream/readline.py @@ -22,7 +22,7 @@ class Completer: def complete(self, text, state): buffer = readline.get_line_buffer() - if text.startswith(('-I', '--init_img')): + if text.startswith(('-I', '--init_img','-M','--init_mask')): return self._path_completions(text, state, ('.png','.jpg','.jpeg')) if buffer.strip().endswith('cd') or text.startswith(('.', '/')): @@ -48,10 +48,15 @@ class Completer: def _path_completions(self, text, state, extensions): # get the path so far + # TODO: replace this mess with a regular expression match if text.startswith('-I'): path = text.replace('-I', '', 1).lstrip() elif text.startswith('--init_img='): path = text.replace('--init_img=', '', 1).lstrip() + elif text.startswith('--init_mask='): + path = text.replace('--init_mask=', '', 1).lstrip() + elif text.startswith('-M'): + path = text.replace('-M', '', 1).lstrip() else: path = text @@ -94,6 +99,7 @@ if readline_available: '--grid','-g', '--individual','-i', '--init_img','-I', + '--init_mask','-M', '--strength','-f', '--variants','-v', '--outdir','-o', diff --git a/ldm/dream/server.py b/ldm/dream/server.py index f592457e4c..19414f65d6 100644 --- a/ldm/dream/server.py +++ b/ldm/dream/server.py @@ -1,16 +1,65 @@ +import argparse import json import base64 import mimetypes import os from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer -from ldm.dream.pngwriter import PngWriter +from ldm.dream.pngwriter import PngWriter, PromptFormatter from threading import Event +def build_opt(post_data, seed, gfpgan_model_exists): + opt = argparse.Namespace() + setattr(opt, 'prompt', post_data['prompt']) + setattr(opt, 'init_img', post_data['initimg']) + setattr(opt, 'strength', float(post_data['strength'])) + setattr(opt, 'iterations', int(post_data['iterations'])) + setattr(opt, 'steps', int(post_data['steps'])) + setattr(opt, 'width', int(post_data['width'])) + setattr(opt, 'height', int(post_data['height'])) + setattr(opt, 'seamless', 'seamless' in post_data) + setattr(opt, 'fit', 'fit' in post_data) + setattr(opt, 'mask', 'mask' in post_data) + setattr(opt, 'invert_mask', 'invert_mask' in post_data) + setattr(opt, 'cfg_scale', float(post_data['cfg_scale'])) + setattr(opt, 'sampler_name', post_data['sampler_name']) + setattr(opt, 'gfpgan_strength', float(post_data['gfpgan_strength']) if gfpgan_model_exists else 0) + setattr(opt, 'upscale', [int(post_data['upscale_level']), float(post_data['upscale_strength'])] if post_data['upscale_level'] != '' else None) + setattr(opt, 'progress_images', 'progress_images' in post_data) + setattr(opt, 'seed', None if int(post_data['seed']) == -1 else int(post_data['seed'])) + setattr(opt, 'variation_amount', float(post_data['variation_amount']) if int(post_data['seed']) != -1 else 0) + setattr(opt, 'with_variations', []) + + broken = False + if int(post_data['seed']) != -1 and post_data['with_variations'] != '': + for part in post_data['with_variations'].split(','): + seed_and_weight = part.split(':') + if len(seed_and_weight) != 2: + print(f'could not parse with_variation part "{part}"') + broken = True + break + try: + seed = int(seed_and_weight[0]) + weight = float(seed_and_weight[1]) + except ValueError: + print(f'could not parse with_variation part "{part}"') + broken = True + break + opt.with_variations.append([seed, weight]) + + if broken: + raise CanceledException + + if len(opt.with_variations) == 0: + opt.with_variations = None + + return opt + class CanceledException(Exception): pass class DreamServer(BaseHTTPRequestHandler): model = None + outdir = None canceled = Event() def do_GET(self): @@ -30,6 +79,23 @@ class DreamServer(BaseHTTPRequestHandler): 'gfpgan_model_exists': gfpgan_model_exists } self.wfile.write(bytes("let config = " + json.dumps(config) + ";\n", "utf-8")) + elif self.path == "/run_log.json": + self.send_response(200) + self.send_header("Content-type", "application/json") + self.end_headers() + output = [] + + log_file = os.path.join(self.outdir, "dream_web_log.txt") + if os.path.exists(log_file): + with open(log_file, "r") as log: + for line in log: + url, config = line.split(": {", maxsplit=1) + config = json.loads("{" + config) + config["url"] = url.lstrip(".") + if os.path.exists(url): + output.append(config) + + self.wfile.write(bytes(json.dumps({"run_log": output}), "utf-8")) elif self.path == "/cancel": self.canceled.set() self.send_response(200) @@ -63,34 +129,19 @@ class DreamServer(BaseHTTPRequestHandler): content_length = int(self.headers['Content-Length']) post_data = json.loads(self.rfile.read(content_length)) - prompt = post_data['prompt'] - initimg = post_data['initimg'] - strength = float(post_data['strength']) - iterations = int(post_data['iterations']) - steps = int(post_data['steps']) - width = int(post_data['width']) - height = int(post_data['height']) - fit = 'fit' in post_data - cfgscale = float(post_data['cfgscale']) - sampler_name = post_data['sampler'] - gfpgan_strength = float(post_data['gfpgan_strength']) if gfpgan_model_exists else 0 - upscale_level = post_data['upscale_level'] - upscale_strength = post_data['upscale_strength'] - upscale = [int(upscale_level),float(upscale_strength)] if upscale_level != '' else None - progress_images = 'progress_images' in post_data - seed = self.model.seed if int(post_data['seed']) == -1 else int(post_data['seed']) + opt = build_opt(post_data, self.model.seed, gfpgan_model_exists) self.canceled.clear() - print(f">> Request to generate with prompt: {prompt}") + print(f">> Request to generate with prompt: {opt.prompt}") # In order to handle upscaled images, the PngWriter needs to maintain state # across images generated by each call to prompt2img(), so we define it in # the outer scope of image_done() config = post_data.copy() # Shallow copy - config['initimg'] = '' + config['initimg'] = config.pop('initimg_name', '') images_generated = 0 # helps keep track of when upscaling is started images_upscaled = 0 # helps keep track of when upscaling is completed - pngwriter = PngWriter("./outputs/img-samples/") + pngwriter = PngWriter(self.outdir) prefix = pngwriter.unique_prefix() # if upscaling is requested, then this will be called twice, once when @@ -99,11 +150,24 @@ class DreamServer(BaseHTTPRequestHandler): # entry should not be inserted into the image list. def image_done(image, seed, upscaled=False): name = f'{prefix}.{seed}.png' - path = pngwriter.save_image_and_prompt_to_png(image, f'{prompt} -S{seed}', name) + iter_opt = argparse.Namespace(**vars(opt)) # copy + if opt.variation_amount > 0: + this_variation = [[seed, opt.variation_amount]] + if opt.with_variations is None: + iter_opt.with_variations = this_variation + else: + iter_opt.with_variations = opt.with_variations + this_variation + iter_opt.variation_amount = 0 + elif opt.with_variations is None: + iter_opt.seed = seed + normalized_prompt = PromptFormatter(self.model, iter_opt).normalize_prompt() + path = pngwriter.save_image_and_prompt_to_png(image, f'{normalized_prompt} -S{iter_opt.seed}', name) + if int(config['seed']) == -1: + config['seed'] = seed # Append post_data to log, but only once! if not upscaled: - with open("./outputs/img-samples/dream_web_log.txt", "a") as log: + with open(os.path.join(self.outdir, "dream_web_log.txt"), "a") as log: log.write(f"{path}: {json.dumps(config)}\n") self.wfile.write(bytes(json.dumps( @@ -111,27 +175,27 @@ class DreamServer(BaseHTTPRequestHandler): ) + '\n',"utf-8")) # control state of the "postprocessing..." message - upscaling_requested = upscale or gfpgan_strength>0 + upscaling_requested = opt.upscale or opt.gfpgan_strength > 0 nonlocal images_generated # NB: Is this bad python style? It is typical usage in a perl closure. nonlocal images_upscaled # NB: Is this bad python style? It is typical usage in a perl closure. if upscaled: images_upscaled += 1 else: - images_generated +=1 + images_generated += 1 if upscaling_requested: action = None - if images_generated >= iterations: - if images_upscaled < iterations: + if images_generated >= opt.iterations: + if images_upscaled < opt.iterations: action = 'upscaling-started' else: action = 'upscaling-done' if action: - x = images_upscaled+1 + x = images_upscaled + 1 self.wfile.write(bytes(json.dumps( - {'event':action,'processed_file_cnt':f'{x}/{iterations}'} + {'event': action, 'processed_file_cnt': f'{x}/{opt.iterations}'} ) + '\n',"utf-8")) - step_writer = PngWriter('./outputs/intermediates/') + step_writer = PngWriter(os.path.join(self.outdir, "intermediates")) step_index = 1 def image_progress(sample, step): if self.canceled.is_set(): @@ -141,10 +205,10 @@ class DreamServer(BaseHTTPRequestHandler): # since rendering images is moderately expensive, only render every 5th image # and don't bother with the last one, since it'll render anyway nonlocal step_index - if progress_images and step % 5 == 0 and step < steps - 1: - image = self.model._sample_to_image(sample) - name = f'{prefix}.{seed}.{step_index}.png' - metadata = f'{prompt} -S{seed} [intermediate]' + if opt.progress_images and step % 5 == 0 and step < opt.steps - 1: + image = self.model.sample_to_image(sample) + name = f'{prefix}.{opt.seed}.{step_index}.png' + metadata = f'{opt.prompt} -S{opt.seed} [intermediate]' path = step_writer.save_image_and_prompt_to_png(image, metadata, name) step_index += 1 self.wfile.write(bytes(json.dumps( @@ -152,43 +216,20 @@ class DreamServer(BaseHTTPRequestHandler): ) + '\n',"utf-8")) try: - if initimg is None: + if opt.init_img is None: # Run txt2img - self.model.prompt2image(prompt, - iterations=iterations, - cfg_scale = cfgscale, - width = width, - height = height, - seed = seed, - steps = steps, - gfpgan_strength = gfpgan_strength, - upscale = upscale, - sampler_name = sampler_name, - step_callback=image_progress, - image_callback=image_done) + self.model.prompt2image(**vars(opt), step_callback=image_progress, image_callback=image_done) else: # Decode initimg as base64 to temp file with open("./img2img-tmp.png", "wb") as f: - initimg = initimg.split(",")[1] # Ignore mime type + initimg = opt.init_img.split(",")[1] # Ignore mime type f.write(base64.b64decode(initimg)) + opt1 = argparse.Namespace(**vars(opt)) + opt1.init_img = "./img2img-tmp.png" try: # Run img2img - self.model.prompt2image(prompt, - init_img = "./img2img-tmp.png", - strength = strength, - iterations = iterations, - cfg_scale = cfgscale, - seed = seed, - steps = steps, - sampler_name = sampler_name, - width = width, - height = height, - fit = fit, - gfpgan_strength=gfpgan_strength, - upscale = upscale, - step_callback=image_progress, - image_callback=image_done) + self.model.prompt2image(**vars(opt1), step_callback=image_progress, image_callback=image_done) finally: # Remove the temp file os.remove("./img2img-tmp.png") diff --git a/ldm/generate.py b/ldm/generate.py new file mode 100644 index 0000000000..8f67403633 --- /dev/null +++ b/ldm/generate.py @@ -0,0 +1,695 @@ +# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein) + +# Derived from source code carrying the following copyrights +# Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich +# Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors + +import torch +import numpy as np +import random +import os +import time +import re +import sys +import traceback +import transformers + +from omegaconf import OmegaConf +from PIL import Image, ImageOps +from torch import nn +from pytorch_lightning import seed_everything + +from ldm.util import instantiate_from_config +from ldm.models.diffusion.ddim import DDIMSampler +from ldm.models.diffusion.plms import PLMSSampler +from ldm.models.diffusion.ksampler import KSampler +from ldm.dream.pngwriter import PngWriter +from ldm.dream.image_util import InitImageResizer +from ldm.dream.devices import choose_torch_device +from ldm.dream.conditioning import get_uc_and_c + +"""Simplified text to image API for stable diffusion/latent diffusion + +Example Usage: + +from ldm.generate import Generate + +# Create an object with default values +gr = Generate() + +# do the slow model initialization +gr.load_model() + +# Do the fast inference & image generation. Any options passed here +# override the default values assigned during class initialization +# Will call load_model() if the model was not previously loaded and so +# may be slow at first. +# The method returns a list of images. Each row of the list is a sub-list of [filename,seed] +results = gr.prompt2png(prompt = "an astronaut riding a horse", + outdir = "./outputs/samples", + iterations = 3) + +for row in results: + print(f'filename={row[0]}') + print(f'seed ={row[1]}') + +# Same thing, but using an initial image. +results = gr.prompt2png(prompt = "an astronaut riding a horse", + outdir = "./outputs/, + iterations = 3, + init_img = "./sketches/horse+rider.png") + +for row in results: + print(f'filename={row[0]}') + print(f'seed ={row[1]}') + +# Same thing, but we return a series of Image objects, which lets you manipulate them, +# combine them, and save them under arbitrary names + +results = gr.prompt2image(prompt = "an astronaut riding a horse" + outdir = "./outputs/") +for row in results: + im = row[0] + seed = row[1] + im.save(f'./outputs/samples/an_astronaut_riding_a_horse-{seed}.png') + im.thumbnail(100,100).save('./outputs/samples/astronaut_thumb.jpg') + +Note that the old txt2img() and img2img() calls are deprecated but will +still work. + +The full list of arguments to Generate() are: +gr = Generate( + weights = path to model weights ('models/ldm/stable-diffusion-v1/model.ckpt') + config = path to model configuraiton ('configs/stable-diffusion/v1-inference.yaml') + iterations = // how many times to run the sampling (1) + steps = // 50 + seed = // current system time + sampler_name= ['ddim', 'k_dpm_2_a', 'k_dpm_2', 'k_euler_a', 'k_euler', 'k_heun', 'k_lms', 'plms'] // k_lms + grid = // false + width = // image width, multiple of 64 (512) + height = // image height, multiple of 64 (512) + cfg_scale = // condition-free guidance scale (7.5) + ) + +""" + + +class Generate: + """Generate class + Stores default values for multiple configuration items + """ + + def __init__( + self, + iterations = 1, + steps = 50, + cfg_scale = 7.5, + weights = 'models/ldm/stable-diffusion-v1/model.ckpt', + config = 'configs/stable-diffusion/v1-inference.yaml', + grid = False, + width = 512, + height = 512, + sampler_name = 'k_lms', + ddim_eta = 0.0, # deterministic + precision = 'autocast', + full_precision = False, + strength = 0.75, # default in scripts/img2img.py + seamless = False, + embedding_path = None, + device_type = 'cuda', + ignore_ctrl_c = False, + ): + self.iterations = iterations + self.width = width + self.height = height + self.steps = steps + self.cfg_scale = cfg_scale + self.weights = weights + self.config = config + self.sampler_name = sampler_name + self.grid = grid + self.ddim_eta = ddim_eta + self.precision = precision + self.full_precision = True if choose_torch_device() == 'mps' else full_precision + self.strength = strength + self.seamless = seamless + self.embedding_path = embedding_path + self.device_type = device_type + self.ignore_ctrl_c = ignore_ctrl_c # note, this logic probably doesn't belong here... + self.model = None # empty for now + self.sampler = None + self.device = None + self.generators = {} + self.base_generator = None + self.seed = None + + if device_type == 'cuda' and not torch.cuda.is_available(): + device_type = choose_torch_device() + print(">> cuda not available, using device", device_type) + self.device = torch.device(device_type) + + # for VRAM usage statistics + device_type = choose_torch_device() + self.session_peakmem = torch.cuda.max_memory_allocated() if device_type == 'cuda' else None + transformers.logging.set_verbosity_error() + + def prompt2png(self, prompt, outdir, **kwargs): + """ + Takes a prompt and an output directory, writes out the requested number + of PNG files, and returns an array of [[filename,seed],[filename,seed]...] + Optional named arguments are the same as those passed to Generate and prompt2image() + """ + results = self.prompt2image(prompt, **kwargs) + pngwriter = PngWriter(outdir) + prefix = pngwriter.unique_prefix() + outputs = [] + for image, seed in results: + name = f'{prefix}.{seed}.png' + path = pngwriter.save_image_and_prompt_to_png( + image, f'{prompt} -S{seed}', name) + outputs.append([path, seed]) + return outputs + + def txt2img(self, prompt, **kwargs): + outdir = kwargs.pop('outdir', 'outputs/img-samples') + return self.prompt2png(prompt, outdir, **kwargs) + + def img2img(self, prompt, **kwargs): + outdir = kwargs.pop('outdir', 'outputs/img-samples') + assert ( + 'init_img' in kwargs + ), 'call to img2img() must include the init_img argument' + return self.prompt2png(prompt, outdir, **kwargs) + + def prompt2image( + self, + # these are common + prompt, + iterations = None, + steps = None, + seed = None, + cfg_scale = None, + ddim_eta = None, + skip_normalize = False, + image_callback = None, + step_callback = None, + width = None, + height = None, + sampler_name = None, + seamless = False, + log_tokenization= False, + with_variations = None, + variation_amount = 0.0, + # these are specific to img2img and inpaint + init_img = None, + init_mask = None, + fit = False, + strength = None, + # these are specific to GFPGAN/ESRGAN + gfpgan_strength= 0, + save_original = False, + upscale = None, + **args, + ): # eat up additional cruft + """ + ldm.generate.prompt2image() is the common entry point for txt2img() and img2img() + It takes the following arguments: + prompt // prompt string (no default) + iterations // iterations (1); image count=iterations + steps // refinement steps per iteration + seed // seed for random number generator + width // width of image, in multiples of 64 (512) + height // height of image, in multiples of 64 (512) + cfg_scale // how strongly the prompt influences the image (7.5) (must be >1) + seamless // whether the generated image should tile + init_img // path to an initial image + strength // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely + gfpgan_strength // strength for GFPGAN. 0.0 preserves image exactly, 1.0 replaces it completely + ddim_eta // image randomness (eta=0.0 means the same seed always produces the same image) + step_callback // a function or method that will be called each step + image_callback // a function or method that will be called each time an image is generated + with_variations // a weighted list [(seed_1, weight_1), (seed_2, weight_2), ...] of variations which should be applied before doing any generation + variation_amount // optional 0-1 value to slerp from -S noise to random noise (allows variations on an image) + + To use the step callback, define a function that receives two arguments: + - Image GPU data + - The step number + + To use the image callback, define a function of method that receives two arguments, an Image object + and the seed. You can then do whatever you like with the image, including converting it to + different formats and manipulating it. For example: + + def process_image(image,seed): + image.save(f{'images/seed.png'}) + + The callback used by the prompt2png() can be found in ldm/dream_util.py. It contains code + to create the requested output directory, select a unique informative name for each image, and + write the prompt into the PNG metadata. + """ + # TODO: convert this into a getattr() loop + steps = steps or self.steps + width = width or self.width + height = height or self.height + seamless = seamless or self.seamless + cfg_scale = cfg_scale or self.cfg_scale + ddim_eta = ddim_eta or self.ddim_eta + iterations = iterations or self.iterations + strength = strength or self.strength + self.seed = seed + self.log_tokenization = log_tokenization + with_variations = [] if with_variations is None else with_variations + + model = ( + self.load_model() + ) # will instantiate the model or return it from cache + + for m in model.modules(): + if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): + m.padding_mode = 'circular' if seamless else m._orig_padding_mode + + assert cfg_scale > 1.0, 'CFG_Scale (-C) must be >1.0' + assert ( + 0.0 < strength < 1.0 + ), 'img2img and inpaint strength can only work with 0.0 < strength < 1.0' + assert ( + 0.0 <= variation_amount <= 1.0 + ), '-v --variation_amount must be in [0.0, 1.0]' + + # check this logic - doesn't look right + if len(with_variations) > 0 or variation_amount > 1.0: + assert seed is not None,\ + 'seed must be specified when using with_variations' + if variation_amount == 0.0: + assert iterations == 1,\ + 'when using --with_variations, multiple iterations are only possible when using --variation_amount' + assert all(0 <= weight <= 1 for _, weight in with_variations),\ + f'variation weights must be in [0.0, 1.0]: got {[weight for _, weight in with_variations]}' + + width, height, _ = self._resolution_check(width, height, log=True) + + if sampler_name and (sampler_name != self.sampler_name): + self.sampler_name = sampler_name + self._set_sampler() + + tic = time.time() + if torch.cuda.is_available(): + torch.cuda.reset_peak_memory_stats() + + results = list() + init_image = None + mask_image = None + + try: + uc, c = get_uc_and_c( + prompt, model=self.model, + skip_normalize=skip_normalize, + log_tokens=self.log_tokenization + ) + + (init_image,mask_image) = self._make_images(init_img,init_mask, width, height, fit) + + if (init_image is not None) and (mask_image is not None): + generator = self._make_inpaint() + elif init_image is not None: + generator = self._make_img2img() + else: + generator = self._make_txt2img() + + generator.set_variation(self.seed, variation_amount, with_variations) + results = generator.generate( + prompt, + iterations = iterations, + seed = self.seed, + sampler = self.sampler, + steps = steps, + cfg_scale = cfg_scale, + conditioning = (uc,c), + ddim_eta = ddim_eta, + image_callback = image_callback, # called after the final image is generated + step_callback = step_callback, # called after each intermediate image is generated + width = width, + height = height, + init_image = init_image, # notice that init_image is different from init_img + mask_image = mask_image, + strength = strength, + ) + + if upscale is not None or gfpgan_strength > 0: + self.upscale_and_reconstruct(results, + upscale = upscale, + strength = gfpgan_strength, + save_original = save_original, + image_callback = image_callback) + + except KeyboardInterrupt: + print('*interrupted*') + if not self.ignore_ctrl_c: + raise KeyboardInterrupt + print( + '>> Partial results will be returned; if --grid was requested, nothing will be returned.' + ) + except RuntimeError as e: + print(traceback.format_exc(), file=sys.stderr) + print('>> Could not generate image.') + + toc = time.time() + print('>> Usage stats:') + print( + f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic) + ) + if torch.cuda.is_available() and self.device.type == 'cuda': + print( + f'>> Max VRAM used for this generation:', + '%4.2fG.' % (torch.cuda.max_memory_allocated() / 1e9), + 'Current VRAM utilization:' + '%4.2fG' % (torch.cuda.memory_allocated() / 1e9), + ) + + self.session_peakmem = max( + self.session_peakmem, torch.cuda.max_memory_allocated() + ) + print( + f'>> Max VRAM used since script start: ', + '%4.2fG' % (self.session_peakmem / 1e9), + ) + return results + + def _make_images(self, img_path, mask_path, width, height, fit=False): + init_image = None + init_mask = None + if not img_path: + return None,None + + image = self._load_img(img_path, width, height, fit=fit) # this returns an Image + init_image = self._create_init_image(image) # this returns a torch tensor + + if self._has_transparency(image) and not mask_path: # if image has a transparent area and no mask was provided, then try to generate mask + print('>> Initial image has transparent areas. Will inpaint in these regions.') + if self._check_for_erasure(image): + print( + '>> WARNING: Colors underneath the transparent region seem to have been erased.\n', + '>> Inpainting will be suboptimal. Please preserve the colors when making\n', + '>> a transparency mask, or provide mask explicitly using --init_mask (-M).' + ) + init_mask = self._create_init_mask(image) # this returns a torch tensor + + if mask_path: + mask_image = self._load_img(mask_path, width, height, fit=fit) # this returns an Image + init_mask = self._create_init_mask(mask_image) + + return init_image,init_mask + + def _make_img2img(self): + if not self.generators.get('img2img'): + from ldm.dream.generator.img2img import Img2Img + self.generators['img2img'] = Img2Img(self.model) + return self.generators['img2img'] + + def _make_txt2img(self): + if not self.generators.get('txt2img'): + from ldm.dream.generator.txt2img import Txt2Img + self.generators['txt2img'] = Txt2Img(self.model) + return self.generators['txt2img'] + + def _make_inpaint(self): + if not self.generators.get('inpaint'): + from ldm.dream.generator.inpaint import Inpaint + self.generators['inpaint'] = Inpaint(self.model) + return self.generators['inpaint'] + + def load_model(self): + """Load and initialize the model from configuration variables passed at object creation time""" + if self.model is None: + seed_everything(random.randrange(0, np.iinfo(np.uint32).max)) + try: + config = OmegaConf.load(self.config) + model = self._load_model_from_config(config, self.weights) + if self.embedding_path is not None: + model.embedding_manager.load( + self.embedding_path, self.full_precision + ) + self.model = model.to(self.device) + # model.to doesn't change the cond_stage_model.device used to move the tokenizer output, so set it here + self.model.cond_stage_model.device = self.device + except AttributeError as e: + print(f'>> Error loading model. {str(e)}', file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + raise SystemExit from e + + self._set_sampler() + + for m in self.model.modules(): + if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): + m._orig_padding_mode = m.padding_mode + + return self.model + + def upscale_and_reconstruct(self, + image_list, + upscale = None, + strength = 0.0, + save_original = False, + image_callback = None): + try: + if upscale is not None: + from ldm.gfpgan.gfpgan_tools import real_esrgan_upscale + if strength > 0: + from ldm.gfpgan.gfpgan_tools import run_gfpgan + except (ModuleNotFoundError, ImportError): + print(traceback.format_exc(), file=sys.stderr) + print('>> You may need to install the ESRGAN and/or GFPGAN modules') + return + + for r in image_list: + image, seed = r + try: + if upscale is not None: + if len(upscale) < 2: + upscale.append(0.75) + image = real_esrgan_upscale( + image, + upscale[1], + int(upscale[0]), + seed, + ) + if strength > 0: + image = run_gfpgan( + image, strength, seed, 1 + ) + except Exception as e: + print( + f'>> Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}' + ) + + if image_callback is not None: + image_callback(image, seed, upscaled=True) + else: + r[0] = image + + # to help WebGUI - front end to generator util function + def sample_to_image(self,samples): + return self._sample_to_image(samples) + + def _sample_to_image(self,samples): + if not self.base_generator: + from ldm.dream.generator import Generator + self.base_generator = Generator(self.model) + return self.base_generator.sample_to_image(samples) + + def _set_sampler(self): + msg = f'>> Setting Sampler to {self.sampler_name}' + if self.sampler_name == 'plms': + self.sampler = PLMSSampler(self.model, device=self.device) + elif self.sampler_name == 'ddim': + self.sampler = DDIMSampler(self.model, device=self.device) + elif self.sampler_name == 'k_dpm_2_a': + self.sampler = KSampler( + self.model, 'dpm_2_ancestral', device=self.device + ) + elif self.sampler_name == 'k_dpm_2': + self.sampler = KSampler(self.model, 'dpm_2', device=self.device) + elif self.sampler_name == 'k_euler_a': + self.sampler = KSampler( + self.model, 'euler_ancestral', device=self.device + ) + elif self.sampler_name == 'k_euler': + self.sampler = KSampler(self.model, 'euler', device=self.device) + elif self.sampler_name == 'k_heun': + self.sampler = KSampler(self.model, 'heun', device=self.device) + elif self.sampler_name == 'k_lms': + self.sampler = KSampler(self.model, 'lms', device=self.device) + else: + msg = f'>> Unsupported Sampler: {self.sampler_name}, Defaulting to plms' + self.sampler = PLMSSampler(self.model, device=self.device) + + print(msg) + + def _load_model_from_config(self, config, ckpt): + print(f'>> Loading model from {ckpt}') + + # for usage statistics + device_type = choose_torch_device() + if device_type == 'cuda': + torch.cuda.reset_peak_memory_stats() + tic = time.time() + + # this does the work + pl_sd = torch.load(ckpt, map_location='cpu') + sd = pl_sd['state_dict'] + model = instantiate_from_config(config.model) + m, u = model.load_state_dict(sd, strict=False) + + if self.full_precision: + print( + '>> Using slower but more accurate full-precision math (--full_precision)' + ) + else: + print( + '>> Using half precision math. Call with --full_precision to use more accurate but VRAM-intensive full precision.' + ) + model.half() + model.to(self.device) + model.eval() + + # usage statistics + toc = time.time() + print( + f'>> Model loaded in', '%4.2fs' % (toc - tic) + ) + if device_type == 'cuda': + print( + '>> Max VRAM used to load the model:', + '%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9), + '\n>> Current VRAM usage:' + '%4.2fG' % (torch.cuda.memory_allocated() / 1e9), + ) + + return model + + def _load_img(self, path, width, height, fit=False): + assert os.path.exists(path), f'>> {path}: File not found' + + # with Image.open(path) as img: + # image = img.convert('RGBA') + image = Image.open(path) + print( + f'>> loaded input image of size {image.width}x{image.height} from {path}' + ) + if fit: + image = self._fit_image(image,(width,height)) + else: + image = self._squeeze_image(image) + return image + + def _create_init_image(self,image): + image = image.convert('RGB') + # print( + # f'>> DEBUG: writing the image to img.png' + # ) + # image.save('img.png') + image = np.array(image).astype(np.float32) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + image = 2.0 * image - 1.0 + return image.to(self.device) + + def _create_init_mask(self, image): + # convert into a black/white mask + image = self._image_to_mask(image) + image = image.convert('RGB') + # BUG: We need to use the model's downsample factor rather than hardcoding "8" + from ldm.dream.generator.base import downsampling + image = image.resize((image.width//downsampling, image.height//downsampling), resample=Image.Resampling.LANCZOS) + # print( + # f'>> DEBUG: writing the mask to mask.png' + # ) + # image.save('mask.png') + image = np.array(image) + image = image.astype(np.float32) / 255.0 + image = image[None].transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + return image.to(self.device) + + # The mask is expected to have the region to be inpainted + # with alpha transparency. It converts it into a black/white + # image with the transparent part black. + def _image_to_mask(self, mask_image, invert=False) -> Image: + # Obtain the mask from the transparency channel + mask = Image.new(mode="L", size=mask_image.size, color=255) + mask.putdata(mask_image.getdata(band=3)) + if invert: + mask = ImageOps.invert(mask) + return mask + + def _has_transparency(self,image): + if image.info.get("transparency", None) is not None: + return True + if image.mode == "P": + transparent = image.info.get("transparency", -1) + for _, index in image.getcolors(): + if index == transparent: + return True + elif image.mode == "RGBA": + extrema = image.getextrema() + if extrema[3][0] < 255: + return True + return False + + + def _check_for_erasure(self,image): + width, height = image.size + pixdata = image.load() + colored = 0 + for y in range(height): + for x in range(width): + if pixdata[x, y][3] == 0: + r, g, b, _ = pixdata[x, y] + if (r, g, b) != (0, 0, 0) and \ + (r, g, b) != (255, 255, 255): + colored += 1 + return colored == 0 + + def _squeeze_image(self,image): + x,y,resize_needed = self._resolution_check(image.width,image.height) + if resize_needed: + return InitImageResizer(image).resize(x,y) + return image + + + def _fit_image(self,image,max_dimensions): + w,h = max_dimensions + print( + f'>> image will be resized to fit inside a box {w}x{h} in size.' + ) + if image.width > image.height: + h = None # by setting h to none, we tell InitImageResizer to fit into the width and calculate height + elif image.height > image.width: + w = None # ditto for w + else: + pass + image = InitImageResizer(image).resize(w,h) # note that InitImageResizer does the multiple of 64 truncation internally + print( + f'>> after adjusting image dimensions to be multiples of 64, init image is {image.width}x{image.height}' + ) + return image + + def _resolution_check(self, width, height, log=False): + resize_needed = False + w, h = map( + lambda x: x - x % 64, (width, height) + ) # resize to integer multiple of 64 + if h != height or w != width: + if log: + print( + f'>> Provided width and height must be multiples of 64. Auto-resizing to {w}x{h}' + ) + height = h + width = w + resize_needed = True + + if (width * height) > (self.width * self.height): + print(">> This input is larger than your defaults. If you run out of memory, please use a smaller image.") + + return width, height, resize_needed + + diff --git a/ldm/gfpgan/gfpgan_tools.py b/ldm/gfpgan/gfpgan_tools.py index ff90a83360..f76790fc08 100644 --- a/ldm/gfpgan/gfpgan_tools.py +++ b/ldm/gfpgan/gfpgan_tools.py @@ -8,18 +8,17 @@ from PIL import Image from scripts.dream import create_argv_parser arg_parser = create_argv_parser() -opt = arg_parser.parse_args() - -model_path = os.path.join(opt.gfpgan_dir, opt.gfpgan_model_path) +opt = arg_parser.parse_args() +model_path = os.path.join(opt.gfpgan_dir, opt.gfpgan_model_path) gfpgan_model_exists = os.path.isfile(model_path) -def _run_gfpgan(image, strength, prompt, seed, upsampler_scale=4): - print(f'>> GFPGAN - Restoring Faces: {prompt} : seed:{seed}') +def run_gfpgan(image, strength, seed, upsampler_scale=4): + print(f'>> GFPGAN - Restoring Faces for image seed:{seed}') gfpgan = None with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=DeprecationWarning) warnings.filterwarnings('ignore', category=UserWarning) - + try: if not gfpgan_model_exists: raise Exception('GFPGAN model not found at path ' + model_path) @@ -46,7 +45,10 @@ def _run_gfpgan(image, strength, prompt, seed, upsampler_scale=4): if gfpgan is None: print( - f'>> GFPGAN not initialized, it must be loaded via the --gfpgan argument' + f'>> WARNING: GFPGAN not initialized.' + ) + print( + f'>> Download https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth to {model_path}, \nor change GFPGAN directory with --gfpgan_dir.' ) return image @@ -75,61 +77,59 @@ def _run_gfpgan(image, strength, prompt, seed, upsampler_scale=4): def _load_gfpgan_bg_upsampler(bg_upsampler, upsampler_scale, bg_tile=400): if bg_upsampler == 'realesrgan': - if not torch.cuda.is_available(): # CPU - warnings.warn( - 'The unoptimized RealESRGAN is slow on CPU. We do not use it. ' - 'If you really want to use it, please modify the corresponding codes.' - ) - bg_upsampler = None + if not torch.cuda.is_available(): # CPU or MPS on M1 + use_half_precision = False else: - model_path = { - 2: 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth', - 4: 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth', - } + use_half_precision = True - if upsampler_scale not in model_path: - return None + model_path = { + 2: 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth', + 4: 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth', + } - from basicsr.archs.rrdbnet_arch import RRDBNet - from realesrgan import RealESRGANer + if upsampler_scale not in model_path: + return None - if upsampler_scale == 4: - model = RRDBNet( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_block=23, - num_grow_ch=32, - scale=4, - ) - if upsampler_scale == 2: - model = RRDBNet( - num_in_ch=3, - num_out_ch=3, - num_feat=64, - num_block=23, - num_grow_ch=32, - scale=2, - ) + from basicsr.archs.rrdbnet_arch import RRDBNet + from realesrgan import RealESRGANer - bg_upsampler = RealESRGANer( - scale=upsampler_scale, - model_path=model_path[upsampler_scale], - model=model, - tile=bg_tile, - tile_pad=10, - pre_pad=0, - half=True, - ) # need to set False in CPU mode + if upsampler_scale == 4: + model = RRDBNet( + num_in_ch=3, + num_out_ch=3, + num_feat=64, + num_block=23, + num_grow_ch=32, + scale=4, + ) + if upsampler_scale == 2: + model = RRDBNet( + num_in_ch=3, + num_out_ch=3, + num_feat=64, + num_block=23, + num_grow_ch=32, + scale=2, + ) + + bg_upsampler = RealESRGANer( + scale=upsampler_scale, + model_path=model_path[upsampler_scale], + model=model, + tile=bg_tile, + tile_pad=10, + pre_pad=0, + half=use_half_precision, + ) else: bg_upsampler = None return bg_upsampler -def real_esrgan_upscale(image, strength, upsampler_scale, prompt, seed): +def real_esrgan_upscale(image, strength, upsampler_scale, seed): print( - f'>> Real-ESRGAN Upscaling: {prompt} : seed:{seed} : scale:{upsampler_scale}x' + f'>> Real-ESRGAN Upscaling seed:{seed} : scale:{upsampler_scale}x' ) with warnings.catch_warnings(): diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py index 672ba24dd1..3868540526 100644 --- a/ldm/models/diffusion/ddim.py +++ b/ldm/models/diffusion/ddim.py @@ -171,6 +171,7 @@ class DDIMSampler(object): ) return samples, intermediates + # This routine gets called from img2img @torch.no_grad() def ddim_sampling( self, @@ -270,6 +271,7 @@ class DDIMSampler(object): return img, intermediates + # This routine gets called from ddim_sampling() and decode() @torch.no_grad() def p_sample_ddim( self, @@ -372,14 +374,16 @@ class DDIMSampler(object): @torch.no_grad() def decode( - self, - x_latent, - cond, - t_start, - img_callback=None, - unconditional_guidance_scale=1.0, - unconditional_conditioning=None, - use_original_steps=False, + self, + x_latent, + cond, + t_start, + img_callback=None, + unconditional_guidance_scale=1.0, + unconditional_conditioning=None, + use_original_steps=False, + init_latent = None, + mask = None, ): timesteps = ( @@ -395,6 +399,8 @@ class DDIMSampler(object): iterator = tqdm(time_range, desc='Decoding image', total=total_steps) x_dec = x_latent + x0 = init_latent + for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full( @@ -403,6 +409,14 @@ class DDIMSampler(object): device=x_latent.device, dtype=torch.long, ) + + if mask is not None: + assert x0 is not None + xdec_orig = self.model.q_sample( + x0, ts + ) # TODO: deterministic forward pass? + x_dec = xdec_orig * mask + (1.0 - mask) * x_dec + x_dec, _ = self.p_sample_ddim( x_dec, cond, @@ -412,6 +426,7 @@ class DDIMSampler(object): unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, ) + if img_callback: img_callback(x_dec, i) diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py index 24aef29279..1321e9db1e 100644 --- a/ldm/modules/attention.py +++ b/ldm/modules/attention.py @@ -7,13 +7,14 @@ from einops import rearrange, repeat from ldm.modules.diffusionmodules.util import checkpoint +import psutil def exists(val): return val is not None def uniq(arr): - return {el: True for el in arr}.keys() + return{el: True for el in arr}.keys() def default(val, d): @@ -45,18 +46,19 @@ class GEGLU(nn.Module): class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) - project_in = ( - nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) - if not glu - else GEGLU(dim, inner_dim) - ) + project_in = nn.Sequential( + nn.Linear(dim, inner_dim), + nn.GELU() + ) if not glu else GEGLU(dim, inner_dim) self.net = nn.Sequential( - project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out) + project_in, + nn.Dropout(dropout), + nn.Linear(inner_dim, dim_out) ) def forward(self, x): @@ -73,9 +75,7 @@ def zero_module(module): def Normalize(in_channels): - return torch.nn.GroupNorm( - num_groups=32, num_channels=in_channels, eps=1e-6, affine=True - ) + return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) class LinearAttention(nn.Module): @@ -83,28 +83,17 @@ class LinearAttention(nn.Module): super().__init__() self.heads = heads hidden_dim = dim_head * heads - self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False) + self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) self.to_out = nn.Conv2d(hidden_dim, dim, 1) def forward(self, x): b, c, h, w = x.shape qkv = self.to_qkv(x) - q, k, v = rearrange( - qkv, - 'b (qkv heads c) h w -> qkv b heads c (h w)', - heads=self.heads, - qkv=3, - ) - k = k.softmax(dim=-1) + q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) + k = k.softmax(dim=-1) context = torch.einsum('bhdn,bhen->bhde', k, v) out = torch.einsum('bhde,bhdn->bhen', context, q) - out = rearrange( - out, - 'b heads c (h w) -> b (heads c) h w', - heads=self.heads, - h=h, - w=w, - ) + out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) return self.to_out(out) @@ -114,18 +103,26 @@ class SpatialSelfAttention(nn.Module): self.in_channels = in_channels self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.k = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.v = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.proj_out = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) def forward(self, x): h_ = x @@ -135,12 +132,12 @@ class SpatialSelfAttention(nn.Module): v = self.v(h_) # compute attention - b, c, h, w = q.shape + b,c,h,w = q.shape q = rearrange(q, 'b c h w -> b (h w) c') k = rearrange(k, 'b c h w -> b c (h w)') w_ = torch.einsum('bij,bjk->bik', q, k) - w_ = w_ * (int(c) ** (-0.5)) + w_ = w_ * (int(c)**(-0.5)) w_ = torch.nn.functional.softmax(w_, dim=2) # attend to values @@ -150,18 +147,16 @@ class SpatialSelfAttention(nn.Module): h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) h_ = self.proj_out(h_) - return x + h_ + return x+h_ class CrossAttention(nn.Module): - def __init__( - self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0 - ): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) - self.scale = dim_head**-0.5 + self.scale = dim_head ** -0.5 self.heads = heads self.to_q = nn.Linear(query_dim, inner_dim, bias=False) @@ -169,69 +164,136 @@ class CrossAttention(nn.Module): self.to_v = nn.Linear(context_dim, inner_dim, bias=False) self.to_out = nn.Sequential( - nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) + nn.Linear(inner_dim, query_dim), + nn.Dropout(dropout) ) + if not torch.cuda.is_available(): + mem_av = psutil.virtual_memory().available / (1024**3) + if mem_av > 32: + self.einsum_op = self.einsum_op_v1 + elif mem_av > 12: + self.einsum_op = self.einsum_op_v2 + else: + self.einsum_op = self.einsum_op_v3 + del mem_av + else: + self.einsum_op = self.einsum_op_v4 + + # mps 64-128 GB + def einsum_op_v1(self, q, k, v, r1): + if q.shape[1] <= 4096: # for 512x512: the max q.shape[1] is 4096 + s1 = einsum('b i d, b j d -> b i j', q, k) * self.scale # aggressive/faster: operation in one go + s2 = s1.softmax(dim=-1, dtype=q.dtype) + del s1 + r1 = einsum('b i j, b j d -> b i d', s2, v) + del s2 + else: + # q.shape[0] * q.shape[1] * slice_size >= 2**31 throws err + # needs around half of that slice_size to not generate noise + slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1])) + for i in range(0, q.shape[1], slice_size): + end = i + slice_size + s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * self.scale + s2 = s1.softmax(dim=-1, dtype=r1.dtype) + del s1 + r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) + del s2 + return r1 + + # mps 16-32 GB (can be optimized) + def einsum_op_v2(self, q, k, v, r1): + slice_size = math.floor(2**30 / (q.shape[0] * q.shape[1])) + for i in range(0, q.shape[1], slice_size): # conservative/less mem: operation in steps + end = i + slice_size + s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * self.scale + s2 = s1.softmax(dim=-1, dtype=r1.dtype) + del s1 + r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) + del s2 + return r1 + + # mps 8 GB + def einsum_op_v3(self, q, k, v, r1): + slice_size = 1 + for i in range(0, q.shape[0], slice_size): # iterate over q.shape[0] + end = min(q.shape[0], i + slice_size) + s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end]) # adapted einsum for mem + s1 *= self.scale + s2 = s1.softmax(dim=-1, dtype=r1.dtype) + del s1 + r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end]) # adapted einsum for mem + del s2 + return r1 + + # cuda + def einsum_op_v4(self, q, k, v, r1): + stats = torch.cuda.memory_stats(q.device) + mem_active = stats['active_bytes.all.current'] + mem_reserved = stats['reserved_bytes.all.current'] + mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) + mem_free_torch = mem_reserved - mem_active + mem_free_total = mem_free_cuda + mem_free_torch + + gb = 1024 ** 3 + tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * 4 + mem_required = tensor_size * 2.5 + steps = 1 + + if mem_required > mem_free_total: + steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2))) + + if steps > 64: + max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64 + raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' + f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free') + + slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] + for i in range(0, q.shape[1], slice_size): + end = min(q.shape[1], i + slice_size) + s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * self.scale + s2 = s1.softmax(dim=-1, dtype=r1.dtype) + del s1 + r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) + del s2 + return r1 + def forward(self, x, context=None, mask=None): h = self.heads - q = self.to_q(x) + q_in = self.to_q(x) context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) + k_in = self.to_k(context) + v_in = self.to_v(context) + device_type = 'mps' if x.device.type == 'mps' else 'cuda' + del context, x - q, k, v = map( - lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v) - ) + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) + del q_in, k_in, v_in + r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) + r1 = self.einsum_op(q, k, v, r1) + del q, k, v - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) + del r1 - if exists(mask): - mask = rearrange(mask, 'b ... -> b (...)') - max_neg_value = -torch.finfo(sim.dtype).max - mask = repeat(mask, 'b j -> (b h) () j', h=h) - sim.masked_fill_(~mask, max_neg_value) - - # attention, what we cannot get enough of - attn = sim.softmax(dim=-1) - - out = einsum('b i j, b j d -> b i d', attn, v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) - return self.to_out(out) + return self.to_out(r2) class BasicTransformerBlock(nn.Module): - def __init__( - self, - dim, - n_heads, - d_head, - dropout=0.0, - context_dim=None, - gated_ff=True, - checkpoint=True, - ): + def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True): super().__init__() - self.attn1 = CrossAttention( - query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout - ) # is a self-attention + self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) - self.attn2 = CrossAttention( - query_dim=dim, - context_dim=context_dim, - heads=n_heads, - dim_head=d_head, - dropout=dropout, - ) # is self-attn if context is none + self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, + heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) self.norm3 = nn.LayerNorm(dim) self.checkpoint = checkpoint def forward(self, x, context=None): - return checkpoint( - self._forward, (x, context), self.parameters(), self.checkpoint - ) + return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) def _forward(self, x, context=None): x = x.contiguous() if x.device.type == 'mps' else x @@ -249,43 +311,29 @@ class SpatialTransformer(nn.Module): Then apply standard transformer action. Finally, reshape to image """ - - def __init__( - self, - in_channels, - n_heads, - d_head, - depth=1, - dropout=0.0, - context_dim=None, - ): + def __init__(self, in_channels, n_heads, d_head, + depth=1, dropout=0., context_dim=None): super().__init__() self.in_channels = in_channels inner_dim = n_heads * d_head self.norm = Normalize(in_channels) - self.proj_in = nn.Conv2d( - in_channels, inner_dim, kernel_size=1, stride=1, padding=0 - ) + self.proj_in = nn.Conv2d(in_channels, + inner_dim, + kernel_size=1, + stride=1, + padding=0) self.transformer_blocks = nn.ModuleList( - [ - BasicTransformerBlock( - inner_dim, - n_heads, - d_head, - dropout=dropout, - context_dim=context_dim, - ) - for d in range(depth) - ] + [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) + for d in range(depth)] ) - self.proj_out = zero_module( - nn.Conv2d( - inner_dim, in_channels, kernel_size=1, stride=1, padding=0 - ) - ) + self.proj_out = zero_module(nn.Conv2d(inner_dim, + in_channels, + kernel_size=1, + stride=1, + padding=0)) def forward(self, x, context=None): # note: if no context is given, cross-attention defaults to self-attention diff --git a/ldm/modules/diffusionmodules/model.py b/ldm/modules/diffusionmodules/model.py index cd79e37565..970f6aad8f 100644 --- a/ldm/modules/diffusionmodules/model.py +++ b/ldm/modules/diffusionmodules/model.py @@ -1,4 +1,5 @@ # pytorch_diffusion + derived encoder decoder +import gc import math import torch import torch.nn as nn @@ -8,6 +9,7 @@ from einops import rearrange from ldm.util import instantiate_from_config from ldm.modules.attention import LinearAttention +import psutil def get_timestep_embedding(timesteps, embedding_dim): """ @@ -26,19 +28,17 @@ def get_timestep_embedding(timesteps, embedding_dim): emb = timesteps.float()[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad - emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) + emb = torch.nn.functional.pad(emb, (0,1,0,0)) return emb def nonlinearity(x): # swish - return x * torch.sigmoid(x) + return x*torch.sigmoid(x) def Normalize(in_channels, num_groups=32): - return torch.nn.GroupNorm( - num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True - ) + return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) class Upsample(nn.Module): @@ -46,14 +46,14 @@ class Upsample(nn.Module): super().__init__() self.with_conv = with_conv if self.with_conv: - self.conv = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=3, stride=1, padding=1 - ) + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1) def forward(self, x): - x = torch.nn.functional.interpolate( - x, scale_factor=2.0, mode='nearest' - ) + x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") if self.with_conv: x = self.conv(x) return x @@ -65,14 +65,16 @@ class Downsample(nn.Module): self.with_conv = with_conv if self.with_conv: # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=3, stride=2, padding=0 - ) + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=2, + padding=0) def forward(self, x): if self.with_conv: - pad = (0, 1, 0, 1) - x = torch.nn.functional.pad(x, pad, mode='constant', value=0) + pad = (0,1,0,1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) x = self.conv(x) else: x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) @@ -80,15 +82,8 @@ class Downsample(nn.Module): class ResnetBlock(nn.Module): - def __init__( - self, - *, - in_channels, - out_channels=None, - conv_shortcut=False, - dropout, - temb_channels=512, - ): + def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, + dropout, temb_channels=512): super().__init__() self.in_channels = in_channels out_channels = in_channels if out_channels is None else out_channels @@ -96,47 +91,60 @@ class ResnetBlock(nn.Module): self.use_conv_shortcut = conv_shortcut self.norm1 = Normalize(in_channels) - self.conv1 = torch.nn.Conv2d( - in_channels, out_channels, kernel_size=3, stride=1, padding=1 - ) + self.conv1 = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) if temb_channels > 0: - self.temb_proj = torch.nn.Linear(temb_channels, out_channels) + self.temb_proj = torch.nn.Linear(temb_channels, + out_channels) self.norm2 = Normalize(out_channels) self.dropout = torch.nn.Dropout(dropout) - self.conv2 = torch.nn.Conv2d( - out_channels, out_channels, kernel_size=3, stride=1, padding=1 - ) + self.conv2 = torch.nn.Conv2d(out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) if self.in_channels != self.out_channels: if self.use_conv_shortcut: - self.conv_shortcut = torch.nn.Conv2d( - in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1, - ) + self.conv_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) else: - self.nin_shortcut = torch.nn.Conv2d( - in_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0, - ) + self.nin_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0) def forward(self, x, temb): - h = x - h = self.norm1(h) - h = nonlinearity(h) - h = self.conv1(h) + h1 = x + h2 = self.norm1(h1) + del h1 + + h3 = nonlinearity(h2) + del h2 + + h4 = self.conv1(h3) + del h3 if temb is not None: - h = h + self.temb_proj(nonlinearity(temb))[:, :, None, None] + h4 = h4 + self.temb_proj(nonlinearity(temb))[:,:,None,None] - h = self.norm2(h) - h = nonlinearity(h) - h = self.dropout(h) - h = self.conv2(h) + h5 = self.norm2(h4) + del h4 + + h6 = nonlinearity(h5) + del h5 + + h7 = self.dropout(h6) + del h6 + + h8 = self.conv2(h7) + del h7 if self.in_channels != self.out_channels: if self.use_conv_shortcut: @@ -144,12 +152,10 @@ class ResnetBlock(nn.Module): else: x = self.nin_shortcut(x) - return x + h - + return x + h8 class LinAttnBlock(LinearAttention): """to match AttnBlock usage""" - def __init__(self, in_channels): super().__init__(dim=in_channels, heads=1, dim_head=in_channels) @@ -160,87 +166,120 @@ class AttnBlock(nn.Module): self.in_channels = in_channels self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.k = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.v = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) - self.proj_out = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=1, stride=1, padding=0 - ) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + def forward(self, x): h_ = x h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) + q1 = self.q(h_) + k1 = self.k(h_) v = self.v(h_) # compute attention - b, c, h, w = q.shape - q = q.reshape(b, c, h * w) - q = q.permute(0, 2, 1) # b,hw,c - k = k.reshape(b, c, h * w) # b,c,hw - w_ = torch.bmm(q, k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] - w_ = w_ * (int(c) ** (-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) + b, c, h, w = q1.shape - # attend to values - v = v.reshape(b, c, h * w) - w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q) - h_ = torch.bmm( - v, w_ - ) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] - h_ = h_.reshape(b, c, h, w) + q2 = q1.reshape(b, c, h*w) + del q1 - h_ = self.proj_out(h_) + q = q2.permute(0, 2, 1) # b,hw,c + del q2 - return x + h_ + k = k1.reshape(b, c, h*w) # b,c,hw + del k1 + + h_ = torch.zeros_like(k, device=q.device) + + device_type = 'mps' if q.device.type == 'mps' else 'cuda' + if device_type == 'cuda': + stats = torch.cuda.memory_stats(q.device) + mem_active = stats['active_bytes.all.current'] + mem_reserved = stats['reserved_bytes.all.current'] + mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) + mem_free_torch = mem_reserved - mem_active + mem_free_total = mem_free_cuda + mem_free_torch + + tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * 4 + mem_required = tensor_size * 2.5 + steps = 1 + + if mem_required > mem_free_total: + steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2))) + + slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] + + else: + if psutil.virtual_memory().available / (1024**3) < 12: + slice_size = 1 + else: + slice_size = min(q.shape[1], math.floor(2**30 / (q.shape[0] * q.shape[1]))) + + for i in range(0, q.shape[1], slice_size): + end = i + slice_size + + w1 = torch.bmm(q[:, i:end], k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] + w2 = w1 * (int(c)**(-0.5)) + del w1 + w3 = torch.nn.functional.softmax(w2, dim=2) + del w2 + + # attend to values + v1 = v.reshape(b, c, h*w) + w4 = w3.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q) + del w3 + + h_[:, :, i:end] = torch.bmm(v1, w4) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] + del v1, w4 + + h2 = h_.reshape(b, c, h, w) + del h_ + + h3 = self.proj_out(h2) + del h2 + + h3 += x + + return h3 -def make_attn(in_channels, attn_type='vanilla'): - assert attn_type in [ - 'vanilla', - 'linear', - 'none', - ], f'attn_type {attn_type} unknown' - print( - f"making attention of type '{attn_type}' with {in_channels} in_channels" - ) - if attn_type == 'vanilla': +def make_attn(in_channels, attn_type="vanilla"): + assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' + print(f"making attention of type '{attn_type}' with {in_channels} in_channels") + if attn_type == "vanilla": return AttnBlock(in_channels) - elif attn_type == 'none': + elif attn_type == "none": return nn.Identity(in_channels) else: return LinAttnBlock(in_channels) class Model(nn.Module): - def __init__( - self, - *, - ch, - out_ch, - ch_mult=(1, 2, 4, 8), - num_res_blocks, - attn_resolutions, - dropout=0.0, - resamp_with_conv=True, - in_channels, - resolution, - use_timestep=True, - use_linear_attn=False, - attn_type='vanilla', - ): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): super().__init__() - if use_linear_attn: - attn_type = 'linear' + if use_linear_attn: attn_type = "linear" self.ch = ch - self.temb_ch = self.ch * 4 + self.temb_ch = self.ch*4 self.num_resolutions = len(ch_mult) self.num_res_blocks = num_res_blocks self.resolution = resolution @@ -250,80 +289,70 @@ class Model(nn.Module): if self.use_timestep: # timestep embedding self.temb = nn.Module() - self.temb.dense = nn.ModuleList( - [ - torch.nn.Linear(self.ch, self.temb_ch), - torch.nn.Linear(self.temb_ch, self.temb_ch), - ] - ) + self.temb.dense = nn.ModuleList([ + torch.nn.Linear(self.ch, + self.temb_ch), + torch.nn.Linear(self.temb_ch, + self.temb_ch), + ]) # downsampling - self.conv_in = torch.nn.Conv2d( - in_channels, self.ch, kernel_size=3, stride=1, padding=1 - ) + self.conv_in = torch.nn.Conv2d(in_channels, + self.ch, + kernel_size=3, + stride=1, + padding=1) curr_res = resolution - in_ch_mult = (1,) + tuple(ch_mult) + in_ch_mult = (1,)+tuple(ch_mult) self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() - block_in = ch * in_ch_mult[i_level] - block_out = ch * ch_mult[i_level] + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks): - block.append( - ResnetBlock( - in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout, - ) - ) + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) down = nn.Module() down.block = block down.attn = attn - if i_level != self.num_resolutions - 1: + if i_level != self.num_resolutions-1: down.downsample = Downsample(block_in, resamp_with_conv) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() - self.mid.block_1 = ResnetBlock( - in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout, - ) + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock( - in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout, - ) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() - block_out = ch * ch_mult[i_level] - skip_in = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks + 1): + block_out = ch*ch_mult[i_level] + skip_in = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks+1): if i_block == self.num_res_blocks: - skip_in = ch * in_ch_mult[i_level] - block.append( - ResnetBlock( - in_channels=block_in + skip_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout, - ) - ) + skip_in = ch*in_ch_mult[i_level] + block.append(ResnetBlock(in_channels=block_in+skip_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) @@ -333,16 +362,18 @@ class Model(nn.Module): if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order + self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d( - block_in, out_ch, kernel_size=3, stride=1, padding=1 - ) + self.conv_out = torch.nn.Conv2d(block_in, + out_ch, + kernel_size=3, + stride=1, + padding=1) def forward(self, x, t=None, context=None): - # assert x.shape[2] == x.shape[3] == self.resolution + #assert x.shape[2] == x.shape[3] == self.resolution if context is not None: # assume aligned context, cat along channel axis x = torch.cat((x, context), dim=1) @@ -364,7 +395,7 @@ class Model(nn.Module): if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) - if i_level != self.num_resolutions - 1: + if i_level != self.num_resolutions-1: hs.append(self.down[i_level].downsample(hs[-1])) # middle @@ -375,10 +406,9 @@ class Model(nn.Module): # upsampling for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks + 1): + for i_block in range(self.num_res_blocks+1): h = self.up[i_level].block[i_block]( - torch.cat([h, hs.pop()], dim=1), temb - ) + torch.cat([h, hs.pop()], dim=1), temb) if len(self.up[i_level].attn) > 0: h = self.up[i_level].attn[i_block](h) if i_level != 0: @@ -395,27 +425,12 @@ class Model(nn.Module): class Encoder(nn.Module): - def __init__( - self, - *, - ch, - out_ch, - ch_mult=(1, 2, 4, 8), - num_res_blocks, - attn_resolutions, - dropout=0.0, - resamp_with_conv=True, - in_channels, - resolution, - z_channels, - double_z=True, - use_linear_attn=False, - attn_type='vanilla', - **ignore_kwargs, - ): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", + **ignore_kwargs): super().__init__() - if use_linear_attn: - attn_type = 'linear' + if use_linear_attn: attn_type = "linear" self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) @@ -424,64 +439,56 @@ class Encoder(nn.Module): self.in_channels = in_channels # downsampling - self.conv_in = torch.nn.Conv2d( - in_channels, self.ch, kernel_size=3, stride=1, padding=1 - ) + self.conv_in = torch.nn.Conv2d(in_channels, + self.ch, + kernel_size=3, + stride=1, + padding=1) curr_res = resolution - in_ch_mult = (1,) + tuple(ch_mult) + in_ch_mult = (1,)+tuple(ch_mult) self.in_ch_mult = in_ch_mult self.down = nn.ModuleList() for i_level in range(self.num_resolutions): block = nn.ModuleList() attn = nn.ModuleList() - block_in = ch * in_ch_mult[i_level] - block_out = ch * ch_mult[i_level] + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks): - block.append( - ResnetBlock( - in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout, - ) - ) + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) down = nn.Module() down.block = block down.attn = attn - if i_level != self.num_resolutions - 1: + if i_level != self.num_resolutions-1: down.downsample = Downsample(block_in, resamp_with_conv) curr_res = curr_res // 2 self.down.append(down) # middle self.mid = nn.Module() - self.mid.block_1 = ResnetBlock( - in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout, - ) + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock( - in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout, - ) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) # end self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d( - block_in, - 2 * z_channels if double_z else z_channels, - kernel_size=3, - stride=1, - padding=1, - ) + self.conv_out = torch.nn.Conv2d(block_in, + 2*z_channels if double_z else z_channels, + kernel_size=3, + stride=1, + padding=1) def forward(self, x): # timestep embedding @@ -495,7 +502,7 @@ class Encoder(nn.Module): if len(self.down[i_level].attn) > 0: h = self.down[i_level].attn[i_block](h) hs.append(h) - if i_level != self.num_resolutions - 1: + if i_level != self.num_resolutions-1: hs.append(self.down[i_level].downsample(hs[-1])) # middle @@ -512,28 +519,12 @@ class Encoder(nn.Module): class Decoder(nn.Module): - def __init__( - self, - *, - ch, - out_ch, - ch_mult=(1, 2, 4, 8), - num_res_blocks, - attn_resolutions, - dropout=0.0, - resamp_with_conv=True, - in_channels, - resolution, - z_channels, - give_pre_end=False, - tanh_out=False, - use_linear_attn=False, - attn_type='vanilla', - **ignorekwargs, - ): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, + attn_type="vanilla", **ignorekwargs): super().__init__() - if use_linear_attn: - attn_type = 'linear' + if use_linear_attn: attn_type = "linear" self.ch = ch self.temb_ch = 0 self.num_resolutions = len(ch_mult) @@ -544,52 +535,43 @@ class Decoder(nn.Module): self.tanh_out = tanh_out # compute in_ch_mult, block_in and curr_res at lowest res - in_ch_mult = (1,) + tuple(ch_mult) - block_in = ch * ch_mult[self.num_resolutions - 1] - curr_res = resolution // 2 ** (self.num_resolutions - 1) - self.z_shape = (1, z_channels, curr_res, curr_res) - print( - 'Working with z of shape {} = {} dimensions.'.format( - self.z_shape, np.prod(self.z_shape) - ) - ) + in_ch_mult = (1,)+tuple(ch_mult) + block_in = ch*ch_mult[self.num_resolutions-1] + curr_res = resolution // 2**(self.num_resolutions-1) + self.z_shape = (1,z_channels,curr_res,curr_res) + print("Working with z of shape {} = {} dimensions.".format( + self.z_shape, np.prod(self.z_shape))) # z to block_in - self.conv_in = torch.nn.Conv2d( - z_channels, block_in, kernel_size=3, stride=1, padding=1 - ) + self.conv_in = torch.nn.Conv2d(z_channels, + block_in, + kernel_size=3, + stride=1, + padding=1) # middle self.mid = nn.Module() - self.mid.block_1 = ResnetBlock( - in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout, - ) + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock( - in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout, - ) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) # upsampling self.up = nn.ModuleList() for i_level in reversed(range(self.num_resolutions)): block = nn.ModuleList() attn = nn.ModuleList() - block_out = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks + 1): - block.append( - ResnetBlock( - in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout, - ) - ) + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks+1): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: attn.append(make_attn(block_in, attn_type=attn_type)) @@ -599,87 +581,103 @@ class Decoder(nn.Module): if i_level != 0: up.upsample = Upsample(block_in, resamp_with_conv) curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order + self.up.insert(0, up) # prepend to get consistent order # end self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d( - block_in, out_ch, kernel_size=3, stride=1, padding=1 - ) + self.conv_out = torch.nn.Conv2d(block_in, + out_ch, + kernel_size=3, + stride=1, + padding=1) def forward(self, z): - # assert z.shape[1:] == self.z_shape[1:] + #assert z.shape[1:] == self.z_shape[1:] self.last_z_shape = z.shape # timestep embedding temb = None # z to block_in - h = self.conv_in(z) + h1 = self.conv_in(z) # middle - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) + h2 = self.mid.block_1(h1, temb) + del h1 + + h3 = self.mid.attn_1(h2) + del h2 + + h = self.mid.block_2(h3, temb) + del h3 + + # prepare for up sampling + device_type = 'mps' if h.device.type == 'mps' else 'cuda' + gc.collect() + if device_type == 'cuda': + torch.cuda.empty_cache() # upsampling for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks + 1): + for i_block in range(self.num_res_blocks+1): h = self.up[i_level].block[i_block](h, temb) if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) + t = h + h = self.up[i_level].attn[i_block](t) + del t + if i_level != 0: - h = self.up[i_level].upsample(h) + t = h + h = self.up[i_level].upsample(t) + del t # end if self.give_pre_end: return h - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) + h1 = self.norm_out(h) + del h + + h2 = nonlinearity(h1) + del h1 + + h = self.conv_out(h2) + del h2 + if self.tanh_out: - h = torch.tanh(h) + t = h + h = torch.tanh(t) + del t + return h class SimpleDecoder(nn.Module): def __init__(self, in_channels, out_channels, *args, **kwargs): super().__init__() - self.model = nn.ModuleList( - [ - nn.Conv2d(in_channels, in_channels, 1), - ResnetBlock( - in_channels=in_channels, - out_channels=2 * in_channels, - temb_channels=0, - dropout=0.0, - ), - ResnetBlock( - in_channels=2 * in_channels, - out_channels=4 * in_channels, - temb_channels=0, - dropout=0.0, - ), - ResnetBlock( - in_channels=4 * in_channels, - out_channels=2 * in_channels, - temb_channels=0, - dropout=0.0, - ), - nn.Conv2d(2 * in_channels, in_channels, 1), - Upsample(in_channels, with_conv=True), - ] - ) + self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), + ResnetBlock(in_channels=in_channels, + out_channels=2 * in_channels, + temb_channels=0, dropout=0.0), + ResnetBlock(in_channels=2 * in_channels, + out_channels=4 * in_channels, + temb_channels=0, dropout=0.0), + ResnetBlock(in_channels=4 * in_channels, + out_channels=2 * in_channels, + temb_channels=0, dropout=0.0), + nn.Conv2d(2*in_channels, in_channels, 1), + Upsample(in_channels, with_conv=True)]) # end self.norm_out = Normalize(in_channels) - self.conv_out = torch.nn.Conv2d( - in_channels, out_channels, kernel_size=3, stride=1, padding=1 - ) + self.conv_out = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) def forward(self, x): for i, layer in enumerate(self.model): - if i in [1, 2, 3]: + if i in [1,2,3]: x = layer(x, None) else: x = layer(x) @@ -691,16 +689,8 @@ class SimpleDecoder(nn.Module): class UpsampleDecoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - ch, - num_res_blocks, - resolution, - ch_mult=(2, 2), - dropout=0.0, - ): + def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, + ch_mult=(2,2), dropout=0.0): super().__init__() # upsampling self.temb_ch = 0 @@ -714,14 +704,10 @@ class UpsampleDecoder(nn.Module): res_block = [] block_out = ch * ch_mult[i_level] for i_block in range(self.num_res_blocks + 1): - res_block.append( - ResnetBlock( - in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout, - ) - ) + res_block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) block_in = block_out self.res_blocks.append(nn.ModuleList(res_block)) if i_level != self.num_resolutions - 1: @@ -730,9 +716,11 @@ class UpsampleDecoder(nn.Module): # end self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d( - block_in, out_channels, kernel_size=3, stride=1, padding=1 - ) + self.conv_out = torch.nn.Conv2d(block_in, + out_channels, + kernel_size=3, + stride=1, + padding=1) def forward(self, x): # upsampling @@ -749,56 +737,35 @@ class UpsampleDecoder(nn.Module): class LatentRescaler(nn.Module): - def __init__( - self, factor, in_channels, mid_channels, out_channels, depth=2 - ): + def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): super().__init__() # residual block, interpolate, residual block self.factor = factor - self.conv_in = nn.Conv2d( - in_channels, mid_channels, kernel_size=3, stride=1, padding=1 - ) - self.res_block1 = nn.ModuleList( - [ - ResnetBlock( - in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0, - ) - for _ in range(depth) - ] - ) + self.conv_in = nn.Conv2d(in_channels, + mid_channels, + kernel_size=3, + stride=1, + padding=1) + self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0) for _ in range(depth)]) self.attn = AttnBlock(mid_channels) - self.res_block2 = nn.ModuleList( - [ - ResnetBlock( - in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0, - ) - for _ in range(depth) - ] - ) + self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0) for _ in range(depth)]) - self.conv_out = nn.Conv2d( - mid_channels, - out_channels, - kernel_size=1, - ) + self.conv_out = nn.Conv2d(mid_channels, + out_channels, + kernel_size=1, + ) def forward(self, x): x = self.conv_in(x) for block in self.res_block1: x = block(x, None) - x = torch.nn.functional.interpolate( - x, - size=( - int(round(x.shape[2] * self.factor)), - int(round(x.shape[3] * self.factor)), - ), - ) + x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) x = self.attn(x) for block in self.res_block2: x = block(x, None) @@ -807,42 +774,17 @@ class LatentRescaler(nn.Module): class MergedRescaleEncoder(nn.Module): - def __init__( - self, - in_channels, - ch, - resolution, - out_ch, - num_res_blocks, - attn_resolutions, - dropout=0.0, - resamp_with_conv=True, - ch_mult=(1, 2, 4, 8), - rescale_factor=1.0, - rescale_module_depth=1, - ): + def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, + ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): super().__init__() intermediate_chn = ch * ch_mult[-1] - self.encoder = Encoder( - in_channels=in_channels, - num_res_blocks=num_res_blocks, - ch=ch, - ch_mult=ch_mult, - z_channels=intermediate_chn, - double_z=False, - resolution=resolution, - attn_resolutions=attn_resolutions, - dropout=dropout, - resamp_with_conv=resamp_with_conv, - out_ch=None, - ) - self.rescaler = LatentRescaler( - factor=rescale_factor, - in_channels=intermediate_chn, - mid_channels=intermediate_chn, - out_channels=out_ch, - depth=rescale_module_depth, - ) + self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, + z_channels=intermediate_chn, double_z=False, resolution=resolution, + attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, + out_ch=None) + self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, + mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) def forward(self, x): x = self.encoder(x) @@ -851,41 +793,15 @@ class MergedRescaleEncoder(nn.Module): class MergedRescaleDecoder(nn.Module): - def __init__( - self, - z_channels, - out_ch, - resolution, - num_res_blocks, - attn_resolutions, - ch, - ch_mult=(1, 2, 4, 8), - dropout=0.0, - resamp_with_conv=True, - rescale_factor=1.0, - rescale_module_depth=1, - ): + def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), + dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): super().__init__() - tmp_chn = z_channels * ch_mult[-1] - self.decoder = Decoder( - out_ch=out_ch, - z_channels=tmp_chn, - attn_resolutions=attn_resolutions, - dropout=dropout, - resamp_with_conv=resamp_with_conv, - in_channels=None, - num_res_blocks=num_res_blocks, - ch_mult=ch_mult, - resolution=resolution, - ch=ch, - ) - self.rescaler = LatentRescaler( - factor=rescale_factor, - in_channels=z_channels, - mid_channels=tmp_chn, - out_channels=tmp_chn, - depth=rescale_module_depth, - ) + tmp_chn = z_channels*ch_mult[-1] + self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, + resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, + ch_mult=ch_mult, resolution=resolution, ch=ch) + self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, + out_channels=tmp_chn, depth=rescale_module_depth) def forward(self, x): x = self.rescaler(x) @@ -894,32 +810,17 @@ class MergedRescaleDecoder(nn.Module): class Upsampler(nn.Module): - def __init__( - self, in_size, out_size, in_channels, out_channels, ch_mult=2 - ): + def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): super().__init__() assert out_size >= in_size - num_blocks = int(np.log2(out_size // in_size)) + 1 - factor_up = 1.0 + (out_size % in_size) - print( - f'Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}' - ) - self.rescaler = LatentRescaler( - factor=factor_up, - in_channels=in_channels, - mid_channels=2 * in_channels, - out_channels=in_channels, - ) - self.decoder = Decoder( - out_ch=out_channels, - resolution=out_size, - z_channels=in_channels, - num_res_blocks=2, - attn_resolutions=[], - in_channels=None, - ch=in_channels, - ch_mult=[ch_mult for _ in range(num_blocks)], - ) + num_blocks = int(np.log2(out_size//in_size))+1 + factor_up = 1.+ (out_size % in_size) + print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") + self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, + out_channels=in_channels) + self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, + attn_resolutions=[], in_channels=None, ch=in_channels, + ch_mult=[ch_mult for _ in range(num_blocks)]) def forward(self, x): x = self.rescaler(x) @@ -928,55 +829,42 @@ class Upsampler(nn.Module): class Resize(nn.Module): - def __init__(self, in_channels=None, learned=False, mode='bilinear'): + def __init__(self, in_channels=None, learned=False, mode="bilinear"): super().__init__() self.with_conv = learned self.mode = mode if self.with_conv: - print( - f'Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode' - ) + print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") raise NotImplementedError() assert in_channels is not None # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d( - in_channels, in_channels, kernel_size=4, stride=2, padding=1 - ) + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=4, + stride=2, + padding=1) def forward(self, x, scale_factor=1.0): - if scale_factor == 1.0: + if scale_factor==1.0: return x else: - x = torch.nn.functional.interpolate( - x, - mode=self.mode, - align_corners=False, - scale_factor=scale_factor, - ) + x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) return x - class FirstStagePostProcessor(nn.Module): - def __init__( - self, - ch_mult: list, - in_channels, - pretrained_model: nn.Module = None, - reshape=False, - n_channels=None, - dropout=0.0, - pretrained_config=None, - ): + + def __init__(self, ch_mult:list, in_channels, + pretrained_model:nn.Module=None, + reshape=False, + n_channels=None, + dropout=0., + pretrained_config=None): super().__init__() if pretrained_config is None: - assert ( - pretrained_model is not None - ), 'Either "pretrained_model" or "pretrained_config" must not be None' + assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' self.pretrained_model = pretrained_model else: - assert ( - pretrained_config is not None - ), 'Either "pretrained_model" or "pretrained_config" must not be None' + assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' self.instantiate_pretrained(pretrained_config) self.do_reshape = reshape @@ -984,28 +872,22 @@ class FirstStagePostProcessor(nn.Module): if n_channels is None: n_channels = self.pretrained_model.encoder.ch - self.proj_norm = Normalize(in_channels, num_groups=in_channels // 2) - self.proj = nn.Conv2d( - in_channels, n_channels, kernel_size=3, stride=1, padding=1 - ) + self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) + self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, + stride=1,padding=1) blocks = [] downs = [] ch_in = n_channels for m in ch_mult: - blocks.append( - ResnetBlock( - in_channels=ch_in, - out_channels=m * n_channels, - dropout=dropout, - ) - ) + blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) ch_in = m * n_channels downs.append(Downsample(ch_in, with_conv=False)) self.model = nn.ModuleList(blocks) self.downsampler = nn.ModuleList(downs) + def instantiate_pretrained(self, config): model = instantiate_from_config(config) self.pretrained_model = model.eval() @@ -1013,23 +895,24 @@ class FirstStagePostProcessor(nn.Module): for param in self.pretrained_model.parameters(): param.requires_grad = False + @torch.no_grad() - def encode_with_pretrained(self, x): + def encode_with_pretrained(self,x): c = self.pretrained_model.encode(x) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() - return c + return c - def forward(self, x): + def forward(self,x): z_fs = self.encode_with_pretrained(x) z = self.proj_norm(z_fs) z = self.proj(z) z = nonlinearity(z) - for submodel, downmodel in zip(self.model, self.downsampler): - z = submodel(z, temb=None) + for submodel, downmodel in zip(self.model,self.downsampler): + z = submodel(z,temb=None) z = downmodel(z) if self.do_reshape: - z = rearrange(z, 'b c h w -> b (h w) c') + z = rearrange(z,'b c h w -> b (h w) c') return z diff --git a/ldm/modules/diffusionmodules/util.py b/ldm/modules/diffusionmodules/util.py index 197b42b2bc..2cb56a14a0 100644 --- a/ldm/modules/diffusionmodules/util.py +++ b/ldm/modules/diffusionmodules/util.py @@ -81,7 +81,9 @@ def make_ddim_timesteps( # assert ddim_timesteps.shape[0] == num_ddim_timesteps # add one to get the final alpha values right (the ones from first scale to data during sampling) - steps_out = ddim_timesteps + 1 +# steps_out = ddim_timesteps + 1 + steps_out = ddim_timesteps + if verbose: print(f'Selected timesteps for ddim sampler: {steps_out}') return steps_out diff --git a/ldm/modules/embedding_manager.py b/ldm/modules/embedding_manager.py index 128b575e9b..b579bcd885 100644 --- a/ldm/modules/embedding_manager.py +++ b/ldm/modules/embedding_manager.py @@ -24,9 +24,9 @@ def get_clip_token_for_string(tokenizer, string): return_tensors='pt', ) tokens = batch_encoding['input_ids'] - assert ( + """ assert ( torch.count_nonzero(tokens - 49407) == 2 - ), f"String '{string}' maps to more than a single token. Please use another string" + ), f"String '{string}' maps to more than a single token. Please use another string" """ return tokens[0, 1] @@ -57,8 +57,9 @@ class EmbeddingManager(nn.Module): ): super().__init__() - self.string_to_token_dict = {} + self.embedder = embedder + self.string_to_token_dict = {} self.string_to_param_dict = nn.ParameterDict() self.initial_embeddings = ( @@ -217,12 +218,28 @@ class EmbeddingManager(nn.Module): def load(self, ckpt_path, full=True): ckpt = torch.load(ckpt_path, map_location='cpu') - self.string_to_token_dict = ckpt["string_to_token"] - self.string_to_param_dict = ckpt["string_to_param"] + + # Handle .pt textual inversion files + if 'string_to_token' in ckpt and 'string_to_param' in ckpt: + self.string_to_token_dict = ckpt["string_to_token"] + self.string_to_param_dict = ckpt["string_to_param"] + + # Handle .bin textual inversion files from Huggingface Concepts + # https://huggingface.co/sd-concepts-library + else: + for token_str in list(ckpt.keys()): + token = get_clip_token_for_string(self.embedder.tokenizer, token_str) + self.string_to_token_dict[token_str] = token + ckpt[token_str] = torch.nn.Parameter(ckpt[token_str]) + + self.string_to_param_dict.update(ckpt) + if not full: for key, value in self.string_to_param_dict.items(): self.string_to_param_dict[key] = torch.nn.Parameter(value.half()) + print(f'Added terms: {", ".join(self.string_to_param_dict.keys())}') + def get_embedding_norms_squared(self): all_params = torch.cat( list(self.string_to_param_dict.values()), axis=0 diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py index c67cf0052c..548c44fa49 100644 --- a/ldm/simplet2i.py +++ b/ldm/simplet2i.py @@ -1,844 +1,13 @@ -# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein) +''' +This module is provided for backward compatibility with the +original (hasty) API. -# Derived from source code carrying the following copyrights -# Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich -# Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors +Please use ldm.generate instead. +''' -import torch -import numpy as np -import random -import os -import traceback -from omegaconf import OmegaConf -from PIL import Image -from tqdm import tqdm, trange -from itertools import islice -from einops import rearrange, repeat -from torchvision.utils import make_grid -from pytorch_lightning import seed_everything -from torch import autocast -from contextlib import contextmanager, nullcontext -import transformers -import time -import re -import sys +from ldm.generate import Generate -from ldm.util import instantiate_from_config -from ldm.models.diffusion.ddim import DDIMSampler -from ldm.models.diffusion.plms import PLMSSampler -from ldm.models.diffusion.ksampler import KSampler -from ldm.dream.pngwriter import PngWriter -from ldm.dream.image_util import InitImageResizer -from ldm.dream.devices import choose_autocast_device, choose_torch_device - -"""Simplified text to image API for stable diffusion/latent diffusion - -Example Usage: - -from ldm.simplet2i import T2I - -# Create an object with default values -t2i = T2I(model = // models/ldm/stable-diffusion-v1/model.ckpt - config = // configs/stable-diffusion/v1-inference.yaml - iterations = // how many times to run the sampling (1) - steps = // 50 - seed = // current system time - sampler_name= ['ddim', 'k_dpm_2_a', 'k_dpm_2', 'k_euler_a', 'k_euler', 'k_heun', 'k_lms', 'plms'] // k_lms - grid = // false - width = // image width, multiple of 64 (512) - height = // image height, multiple of 64 (512) - cfg_scale = // unconditional guidance scale (7.5) - ) - -# do the slow model initialization -t2i.load_model() - -# Do the fast inference & image generation. Any options passed here -# override the default values assigned during class initialization -# Will call load_model() if the model was not previously loaded and so -# may be slow at first. -# The method returns a list of images. Each row of the list is a sub-list of [filename,seed] -results = t2i.prompt2png(prompt = "an astronaut riding a horse", - outdir = "./outputs/samples", - iterations = 3) - -for row in results: - print(f'filename={row[0]}') - print(f'seed ={row[1]}') - -# Same thing, but using an initial image. -results = t2i.prompt2png(prompt = "an astronaut riding a horse", - outdir = "./outputs/, - iterations = 3, - init_img = "./sketches/horse+rider.png") - -for row in results: - print(f'filename={row[0]}') - print(f'seed ={row[1]}') - -# Same thing, but we return a series of Image objects, which lets you manipulate them, -# combine them, and save them under arbitrary names - -results = t2i.prompt2image(prompt = "an astronaut riding a horse" - outdir = "./outputs/") -for row in results: - im = row[0] - seed = row[1] - im.save(f'./outputs/samples/an_astronaut_riding_a_horse-{seed}.png') - im.thumbnail(100,100).save('./outputs/samples/astronaut_thumb.jpg') - -Note that the old txt2img() and img2img() calls are deprecated but will -still work. -""" - - -class T2I: - """T2I class - Attributes - ---------- - model - config - iterations - steps - seed - sampler_name - width - height - cfg_scale - latent_channels - downsampling_factor - precision - strength - embedding_path - - The vast majority of these arguments default to reasonable values. -""" - - def __init__( - self, - iterations=1, - steps=50, - seed=None, - cfg_scale=7.5, - weights='models/ldm/stable-diffusion-v1/model.ckpt', - config='configs/stable-diffusion/v1-inference.yaml', - grid=False, - width=512, - height=512, - sampler_name='k_lms', - latent_channels=4, - downsampling_factor=8, - ddim_eta=0.0, # deterministic - precision='autocast', - full_precision=False, - strength=0.75, # default in scripts/img2img.py - embedding_path=None, - device_type = 'cuda', - # just to keep track of this parameter when regenerating prompt - # needs to be replaced when new configuration system implemented. - latent_diffusion_weights=False, - ): - self.iterations = iterations - self.width = width - self.height = height - self.steps = steps - self.cfg_scale = cfg_scale - self.weights = weights - self.config = config - self.sampler_name = sampler_name - self.latent_channels = latent_channels - self.downsampling_factor = downsampling_factor - self.grid = grid - self.ddim_eta = ddim_eta - self.precision = precision - self.full_precision = True if choose_torch_device() == 'mps' else full_precision - self.strength = strength - self.embedding_path = embedding_path - self.device_type = device_type - self.model = None # empty for now - self.sampler = None - self.device = None - self.latent_diffusion_weights = latent_diffusion_weights - - if device_type == 'cuda' and not torch.cuda.is_available(): - device_type = choose_torch_device() - print(">> cuda not available, using device", device_type) - self.device = torch.device(device_type) - - # for VRAM usage statistics - device_type = choose_torch_device() - self.session_peakmem = torch.cuda.max_memory_allocated() if device_type == 'cuda' else None - - if seed is None: - self.seed = self._new_seed() - else: - self.seed = seed - transformers.logging.set_verbosity_error() - - def prompt2png(self, prompt, outdir, **kwargs): - """ - Takes a prompt and an output directory, writes out the requested number - of PNG files, and returns an array of [[filename,seed],[filename,seed]...] - Optional named arguments are the same as those passed to T2I and prompt2image() - """ - results = self.prompt2image(prompt, **kwargs) - pngwriter = PngWriter(outdir) - prefix = pngwriter.unique_prefix() - outputs = [] - for image, seed in results: - name = f'{prefix}.{seed}.png' - path = pngwriter.save_image_and_prompt_to_png( - image, f'{prompt} -S{seed}', name) - outputs.append([path, seed]) - return outputs - - def txt2img(self, prompt, **kwargs): - outdir = kwargs.pop('outdir', 'outputs/img-samples') - return self.prompt2png(prompt, outdir, **kwargs) - - def img2img(self, prompt, **kwargs): - outdir = kwargs.pop('outdir', 'outputs/img-samples') - assert ( - 'init_img' in kwargs - ), 'call to img2img() must include the init_img argument' - return self.prompt2png(prompt, outdir, **kwargs) - - def prompt2image( - self, - # these are common - prompt, - iterations = None, - steps = None, - seed = None, - cfg_scale = None, - ddim_eta = None, - skip_normalize = False, - image_callback = None, - step_callback = None, - width = None, - height = None, - # these are specific to img2img - init_img = None, - fit = False, - strength = None, - gfpgan_strength= 0, - save_original = False, - upscale = None, - sampler_name = None, - log_tokenization= False, - with_variations = None, - variation_amount = 0.0, - **args, - ): # eat up additional cruft - """ - ldm.prompt2image() is the common entry point for txt2img() and img2img() - It takes the following arguments: - prompt // prompt string (no default) - iterations // iterations (1); image count=iterations - steps // refinement steps per iteration - seed // seed for random number generator - width // width of image, in multiples of 64 (512) - height // height of image, in multiples of 64 (512) - cfg_scale // how strongly the prompt influences the image (7.5) (must be >1) - init_img // path to an initial image - its dimensions override width and height - strength // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely - gfpgan_strength // strength for GFPGAN. 0.0 preserves image exactly, 1.0 replaces it completely - ddim_eta // image randomness (eta=0.0 means the same seed always produces the same image) - step_callback // a function or method that will be called each step - image_callback // a function or method that will be called each time an image is generated - with_variations // a weighted list [(seed_1, weight_1), (seed_2, weight_2), ...] of variations which should be applied before doing any generation - variation_amount // optional 0-1 value to slerp from -S noise to random noise (allows variations on an image) - - To use the step callback, define a function that receives two arguments: - - Image GPU data - - The step number - - To use the image callback, define a function of method that receives two arguments, an Image object - and the seed. You can then do whatever you like with the image, including converting it to - different formats and manipulating it. For example: - - def process_image(image,seed): - image.save(f{'images/seed.png'}) - - The callback used by the prompt2png() can be found in ldm/dream_util.py. It contains code - to create the requested output directory, select a unique informative name for each image, and - write the prompt into the PNG metadata. - """ - # TODO: convert this into a getattr() loop - steps = steps or self.steps - width = width or self.width - height = height or self.height - cfg_scale = cfg_scale or self.cfg_scale - ddim_eta = ddim_eta or self.ddim_eta - iterations = iterations or self.iterations - strength = strength or self.strength - self.log_tokenization = log_tokenization - with_variations = [] if with_variations is None else with_variations - - model = ( - self.load_model() - ) # will instantiate the model or return it from cache - assert cfg_scale > 1.0, 'CFG_Scale (-C) must be >1.0' - assert ( - 0.0 <= strength <= 1.0 - ), 'can only work with strength in [0.0, 1.0]' - assert ( - 0.0 <= variation_amount <= 1.0 - ), '-v --variation_amount must be in [0.0, 1.0]' - - if len(with_variations) > 0 or variation_amount > 0.0: - assert seed is not None,\ - 'seed must be specified when using with_variations' - if variation_amount == 0.0: - assert iterations == 1,\ - 'when using --with_variations, multiple iterations are only possible when using --variation_amount' - assert all(0 <= weight <= 1 for _, weight in with_variations),\ - f'variation weights must be in [0.0, 1.0]: got {[weight for _, weight in with_variations]}' - - seed = seed or self.seed - width, height, _ = self._resolution_check(width, height, log=True) - - # TODO: - Check if this is still necessary to run on M1 devices. - # - Move code into ldm.dream.devices to live alongside other - # special-hardware casing code. - if self.precision == 'autocast' and torch.cuda.is_available(): - scope = autocast - else: - scope = nullcontext - - if sampler_name and (sampler_name != self.sampler_name): - self.sampler_name = sampler_name - self._set_sampler() - - tic = time.time() - if torch.cuda.is_available(): - torch.cuda.reset_peak_memory_stats() - results = list() - - try: - if init_img: - assert os.path.exists(init_img), f'{init_img}: File not found' - init_image = self._load_img(init_img, width, height, fit).to(self.device) - with scope(self.device.type): - init_latent = self.model.get_first_stage_encoding( - self.model.encode_first_stage(init_image) - ) # move to latent space - - #print(f' DEBUG: seed at make_image time ={seed}') - make_image = self._img2img( - prompt, - steps=steps, - cfg_scale=cfg_scale, - ddim_eta=ddim_eta, - skip_normalize=skip_normalize, - init_latent=init_latent, - strength=strength, - callback=step_callback, - ) - else: - init_latent = None - make_image = self._txt2img( - prompt, - steps=steps, - cfg_scale=cfg_scale, - ddim_eta=ddim_eta, - skip_normalize=skip_normalize, - width=width, - height=height, - callback=step_callback, - ) - - initial_noise = None - if variation_amount > 0 or len(with_variations) > 0: - # use fixed initial noise plus random noise per iteration - seed_everything(seed) - initial_noise = self._get_noise(init_latent,width,height) - for v_seed, v_weight in with_variations: - seed = v_seed - seed_everything(seed) - next_noise = self._get_noise(init_latent,width,height) - initial_noise = self.slerp(v_weight, initial_noise, next_noise) - if variation_amount > 0: - random.seed() # reset RNG to an actually random state, so we can get a random seed for variations - seed = random.randrange(0,np.iinfo(np.uint32).max) - - device_type = choose_autocast_device(self.device) - with scope(device_type), self.model.ema_scope(): - for n in trange(iterations, desc='Generating'): - x_T = None - if variation_amount > 0: - seed_everything(seed) - target_noise = self._get_noise(init_latent,width,height) - x_T = self.slerp(variation_amount, initial_noise, target_noise) - elif initial_noise is not None: - # i.e. we specified particular variations - x_T = initial_noise - else: - seed_everything(seed) - if self.device.type == 'mps': - x_T = self._get_noise(init_latent,width,height) - # make_image will do the equivalent of get_noise itself - #print(f' DEBUG: seed at make_image() invocation time ={seed}') - image = make_image(x_T) - results.append([image, seed]) - if image_callback is not None: - image_callback(image, seed) - seed = self._new_seed() - - if upscale is not None or gfpgan_strength > 0: - for result in results: - image, seed = result - try: - if upscale is not None: - from ldm.gfpgan.gfpgan_tools import ( - real_esrgan_upscale, - ) - if len(upscale) < 2: - upscale.append(0.75) - image = real_esrgan_upscale( - image, - upscale[1], - int(upscale[0]), - prompt, - seed, - ) - if gfpgan_strength > 0: - from ldm.gfpgan.gfpgan_tools import _run_gfpgan - - image = _run_gfpgan( - image, gfpgan_strength, prompt, seed, 1 - ) - except Exception as e: - print( - f'>> Error running RealESRGAN - Your image was not upscaled.\n{e}' - ) - if image_callback is not None: - if save_original: - image_callback(image, seed) - else: - image_callback(image, seed, upscaled=True) - else: # no callback passed, so we simply replace old image with rescaled one - result[0] = image - - except KeyboardInterrupt: - print('*interrupted*') - print( - '>> Partial results will be returned; if --grid was requested, nothing will be returned.' - ) - except RuntimeError as e: - print(traceback.format_exc(), file=sys.stderr) - print('>> Are you sure your system has an adequate NVIDIA GPU?') - - toc = time.time() - print('>> Usage stats:') - print( - f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic) - ) - print( - f'>> Max VRAM used for this generation:', - '%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9), - ) - - if self.session_peakmem: - self.session_peakmem = max( - self.session_peakmem, torch.cuda.max_memory_allocated() - ) - print( - f'>> Max VRAM used since script start: ', - '%4.2fG' % (self.session_peakmem / 1e9), - ) - return results - - @torch.no_grad() - def _txt2img( - self, - prompt, - steps, - cfg_scale, - ddim_eta, - skip_normalize, - width, - height, - callback, - ): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it - """ - - sampler = self.sampler - - def make_image(x_T): - uc, c = self._get_uc_and_c(prompt, skip_normalize) - shape = [ - self.latent_channels, - height // self.downsampling_factor, - width // self.downsampling_factor, - ] - samples, _ = sampler.sample( - batch_size=1, - S=steps, - x_T=x_T, - conditioning=c, - shape=shape, - verbose=False, - unconditional_guidance_scale=cfg_scale, - unconditional_conditioning=uc, - eta=ddim_eta, - img_callback=callback - ) - return self._sample_to_image(samples) - return make_image - - @torch.no_grad() - def _img2img( - self, - prompt, - steps, - cfg_scale, - ddim_eta, - skip_normalize, - init_latent, - strength, - callback, # Currently not implemented for img2img - ): - """ - Returns a function returning an image derived from the prompt and the initial image - Return value depends on the seed at the time you call it - """ - - # PLMS sampler not supported yet, so ignore previous sampler - if self.sampler_name != 'ddim': - print( - f">> sampler '{self.sampler_name}' is not yet supported. Using DDIM sampler" - ) - sampler = DDIMSampler(self.model, device=self.device) - else: - sampler = self.sampler - - sampler.make_schedule( - ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False - ) - - t_enc = int(strength * steps) - - def make_image(x_T): - uc, c = self._get_uc_and_c(prompt, skip_normalize) - - # encode (scaled latent) - z_enc = sampler.stochastic_encode( - init_latent, - torch.tensor([t_enc]).to(self.device), - noise=x_T - ) - # decode it - samples = sampler.decode( - z_enc, - c, - t_enc, - img_callback=callback, - unconditional_guidance_scale=cfg_scale, - unconditional_conditioning=uc, - ) - return self._sample_to_image(samples) - return make_image - - # TODO: does this actually need to run every loop? does anything in it vary by random seed? - def _get_uc_and_c(self, prompt, skip_normalize): - - uc = self.model.get_learned_conditioning(['']) - - # get weighted sub-prompts - weighted_subprompts = T2I._split_weighted_subprompts( - prompt, skip_normalize) - - if len(weighted_subprompts) > 1: - # i dont know if this is correct.. but it works - c = torch.zeros_like(uc) - # normalize each "sub prompt" and add it - for subprompt, weight in weighted_subprompts: - self._log_tokenization(subprompt) - c = torch.add( - c, - self.model.get_learned_conditioning([subprompt]), - alpha=weight, - ) - else: # just standard 1 prompt - self._log_tokenization(prompt) - c = self.model.get_learned_conditioning([prompt]) - return (uc, c) - - def _sample_to_image(self, samples): - x_samples = self.model.decode_first_stage(samples) - x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) - if len(x_samples) != 1: - raise Exception( - f'>> expected to get a single image, but got {len(x_samples)}') - x_sample = 255.0 * rearrange( - x_samples[0].cpu().numpy(), 'c h w -> h w c' - ) - return Image.fromarray(x_sample.astype(np.uint8)) - - def _new_seed(self): - self.seed = random.randrange(0, np.iinfo(np.uint32).max) - return self.seed - - def load_model(self): - """Load and initialize the model from configuration variables passed at object creation time""" - if self.model is None: - seed_everything(self.seed) - try: - config = OmegaConf.load(self.config) - model = self._load_model_from_config(config, self.weights) - if self.embedding_path is not None: - model.embedding_manager.load( - self.embedding_path, self.full_precision - ) - self.model = model.to(self.device) - # model.to doesn't change the cond_stage_model.device used to move the tokenizer output, so set it here - self.model.cond_stage_model.device = self.device - except AttributeError as e: - print(f'>> Error loading model. {str(e)}', file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - raise SystemExit from e - - self._set_sampler() - - return self.model - - # returns a tensor filled with random numbers from a normal distribution - def _get_noise(self,init_latent,width,height): - if init_latent is not None: - if self.device.type == 'mps': - return torch.randn_like(init_latent, device='cpu').to(self.device) - else: - return torch.randn_like(init_latent, device=self.device) - else: - if self.device.type == 'mps': - return torch.randn([1, - self.latent_channels, - height // self.downsampling_factor, - width // self.downsampling_factor], - device='cpu').to(self.device) - else: - return torch.randn([1, - self.latent_channels, - height // self.downsampling_factor, - width // self.downsampling_factor], - device=self.device) - - def _set_sampler(self): - msg = f'>> Setting Sampler to {self.sampler_name}' - if self.sampler_name == 'plms': - self.sampler = PLMSSampler(self.model, device=self.device) - elif self.sampler_name == 'ddim': - self.sampler = DDIMSampler(self.model, device=self.device) - elif self.sampler_name == 'k_dpm_2_a': - self.sampler = KSampler( - self.model, 'dpm_2_ancestral', device=self.device - ) - elif self.sampler_name == 'k_dpm_2': - self.sampler = KSampler(self.model, 'dpm_2', device=self.device) - elif self.sampler_name == 'k_euler_a': - self.sampler = KSampler( - self.model, 'euler_ancestral', device=self.device - ) - elif self.sampler_name == 'k_euler': - self.sampler = KSampler(self.model, 'euler', device=self.device) - elif self.sampler_name == 'k_heun': - self.sampler = KSampler(self.model, 'heun', device=self.device) - elif self.sampler_name == 'k_lms': - self.sampler = KSampler(self.model, 'lms', device=self.device) - else: - msg = f'>> Unsupported Sampler: {self.sampler_name}, Defaulting to plms' - self.sampler = PLMSSampler(self.model, device=self.device) - - print(msg) - - def _load_model_from_config(self, config, ckpt): - print(f'>> Loading model from {ckpt}') - pl_sd = torch.load(ckpt, map_location='cpu') - # if "global_step" in pl_sd: - # print(f"Global Step: {pl_sd['global_step']}") - sd = pl_sd['state_dict'] - model = instantiate_from_config(config.model) - m, u = model.load_state_dict(sd, strict=False) - model.to(self.device) - model.eval() - if self.full_precision: - print( - 'Using slower but more accurate full-precision math (--full_precision)' - ) - else: - print( - '>> Using half precision math. Call with --full_precision to use more accurate but VRAM-intensive full precision.' - ) - model.half() - return model - - def _load_img(self, path, width, height, fit=False): - with Image.open(path) as img: - image = img.convert('RGB') - print( - f'>> loaded input image of size {image.width}x{image.height} from {path}' - ) - - # The logic here is: - # 1. If "fit" is true, then the image will be fit into the bounding box defined - # by width and height. It will do this in a way that preserves the init image's - # aspect ratio while preventing letterboxing. This means that if there is - # leftover horizontal space after rescaling the image to fit in the bounding box, - # the generated image's width will be reduced to the rescaled init image's width. - # Similarly for the vertical space. - # 2. Otherwise, if "fit" is false, then the image will be scaled, preserving its - # aspect ratio, to the nearest multiple of 64. Large images may generate an - # unexpected OOM error. - if fit: - image = self._fit_image(image,(width,height)) - else: - image = self._squeeze_image(image) - image = np.array(image).astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - image = torch.from_numpy(image) - return 2.0 * image - 1.0 - - def _squeeze_image(self,image): - x,y,resize_needed = self._resolution_check(image.width,image.height) - if resize_needed: - return InitImageResizer(image).resize(x,y) - return image - - - def _fit_image(self,image,max_dimensions): - w,h = max_dimensions - print( - f'>> image will be resized to fit inside a box {w}x{h} in size.' - ) - if image.width > image.height: - h = None # by setting h to none, we tell InitImageResizer to fit into the width and calculate height - elif image.height > image.width: - w = None # ditto for w - else: - pass - image = InitImageResizer(image).resize(w,h) # note that InitImageResizer does the multiple of 64 truncation internally - print( - f'>> after adjusting image dimensions to be multiples of 64, init image is {image.width}x{image.height}' - ) - return image - - - # TO DO: Move this and related weighted subprompt code into its own module. - def _split_weighted_subprompts(text, skip_normalize=False): - """ - grabs all text up to the first occurrence of ':' - uses the grabbed text as a sub-prompt, and takes the value following ':' as weight - if ':' has no value defined, defaults to 1.0 - repeats until no text remaining - """ - prompt_parser = re.compile(""" - (?P # capture group for 'prompt' - (?:\\\:|[^:])+ # match one or more non ':' characters or escaped colons '\:' - ) # end 'prompt' - (?: # non-capture group - :+ # match one or more ':' characters - (?P # capture group for 'weight' - -?\d+(?:\.\d+)? # match positive or negative integer or decimal number - )? # end weight capture group, make optional - \s* # strip spaces after weight - | # OR - $ # else, if no ':' then match end of line - ) # end non-capture group - """, re.VERBOSE) - parsed_prompts = [(match.group("prompt").replace("\\:", ":"), float( - match.group("weight") or 1)) for match in re.finditer(prompt_parser, text)] - if skip_normalize: - return parsed_prompts - weight_sum = sum(map(lambda x: x[1], parsed_prompts)) - if weight_sum == 0: - print( - "Warning: Subprompt weights add up to zero. Discarding and using even weights instead.") - equal_weight = 1 / len(parsed_prompts) - return [(x[0], equal_weight) for x in parsed_prompts] - return [(x[0], x[1] / weight_sum) for x in parsed_prompts] - - # shows how the prompt is tokenized - # usually tokens have '' to indicate end-of-word, - # but for readability it has been replaced with ' ' - def _log_tokenization(self, text): - if not self.log_tokenization: - return - tokens = self.model.cond_stage_model.tokenizer._tokenize(text) - tokenized = "" - discarded = "" - usedTokens = 0 - totalTokens = len(tokens) - for i in range(0, totalTokens): - token = tokens[i].replace('', ' ') - # alternate color - s = (usedTokens % 6) + 1 - if i < self.model.cond_stage_model.max_length: - tokenized = tokenized + f"\x1b[0;3{s};40m{token}" - usedTokens += 1 - else: # over max token length - discarded = discarded + f"\x1b[0;3{s};40m{token}" - print(f"\nTokens ({usedTokens}):\n{tokenized}\x1b[0m") - if discarded != "": - print( - f"Tokens Discarded ({totalTokens-usedTokens}):\n{discarded}\x1b[0m") - - def _resolution_check(self, width, height, log=False): - resize_needed = False - w, h = map( - lambda x: x - x % 64, (width, height) - ) # resize to integer multiple of 64 - if h != height or w != width: - if log: - print( - f'>> Provided width and height must be multiples of 64. Auto-resizing to {w}x{h}' - ) - height = h - width = w - resize_needed = True - - if (width * height) > (self.width * self.height): - print(">> This input is larger than your defaults. If you run out of memory, please use a smaller image.") - - return width, height, resize_needed - - - def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995): - ''' - Spherical linear interpolation - Args: - t (float/np.ndarray): Float value between 0.0 and 1.0 - v0 (np.ndarray): Starting vector - v1 (np.ndarray): Final vector - DOT_THRESHOLD (float): Threshold for considering the two vectors as - colineal. Not recommended to alter this. - Returns: - v2 (np.ndarray): Interpolation vector between v0 and v1 - ''' - inputs_are_torch = False - if not isinstance(v0, np.ndarray): - inputs_are_torch = True - v0 = v0.detach().cpu().numpy() - if not isinstance(v1, np.ndarray): - inputs_are_torch = True - v1 = v1.detach().cpu().numpy() - - dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1))) - if np.abs(dot) > DOT_THRESHOLD: - v2 = (1 - t) * v0 + t * v1 - else: - theta_0 = np.arccos(dot) - sin_theta_0 = np.sin(theta_0) - theta_t = theta_0 * t - sin_theta_t = np.sin(theta_t) - s0 = np.sin(theta_0 - theta_t) / sin_theta_0 - s1 = sin_theta_t / sin_theta_0 - v2 = s0 * v0 + s1 * v1 - - if inputs_are_torch: - v2 = torch.from_numpy(v2).to(self.device) - - return v2 +class T2I(Generate): + def __init__(self,**kwargs): + print(f'>> The ldm.simplet2i module is deprecated. Use ldm.generate instead. It is a drop-in replacement.') + super().__init__(kwargs) diff --git a/Stable-Diffusion-local-Windows.ipynb b/notebooks/Stable-Diffusion-local-Windows.ipynb similarity index 92% rename from Stable-Diffusion-local-Windows.ipynb rename to notebooks/Stable-Diffusion-local-Windows.ipynb index f4cea1503d..1c5e90dcad 100644 --- a/Stable-Diffusion-local-Windows.ipynb +++ b/notebooks/Stable-Diffusion-local-Windows.ipynb @@ -65,25 +65,31 @@ "imageio-ffmpeg==0.4.2\n", "imageio==2.9.0\n", "kornia==0.6.0\n", + "# pip will resolve the version which matches torch\n", + "numpy\n", "omegaconf==2.1.1\n", "opencv-python==4.6.0.66\n", "pillow==9.2.0\n", + "pip>=22\n", "pudb==2019.2\n", "pytorch-lightning==1.4.2\n", "streamlit==1.12.0\n", - "# Regular \"taming-transformers\" doesn't seem to work\n", + "# \"CompVis/taming-transformers\" doesn't work\n", + "# ldm\\models\\autoencoder.py\", line 6, in \n", + "# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer\n", + "# ModuleNotFoundError\n", "taming-transformers-rom1504==0.0.6\n", "test-tube>=0.7.5\n", "torch-fidelity==0.3.0\n", "torchmetrics==0.6.0\n", - "torchvision==0.12.0\n", "transformers==4.19.2\n", "git+https://github.com/openai/CLIP.git@main#egg=clip\n", "git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion\n", "# No CUDA in PyPi builds\n", - "torch@https://download.pytorch.org/whl/cu113/torch-1.11.0%2Bcu113-cp310-cp310-win_amd64.whl\n", - "# No MKL in PyPi builds (faster, more robust than OpenBLAS)\n", - "numpy@https://download.lfd.uci.edu/pythonlibs/archived/numpy-1.22.4+mkl-cp310-cp310-win_amd64.whl\n", + "--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org\n", + "torch==1.11.0\n", + "# Same as numpy - let pip do its thing\n", + "torchvision\n", "-e .\n" ] }, diff --git a/Stable_Diffusion_AI_Notebook.ipynb b/notebooks/Stable_Diffusion_AI_Notebook.ipynb similarity index 76% rename from Stable_Diffusion_AI_Notebook.ipynb rename to notebooks/Stable_Diffusion_AI_Notebook.ipynb index defc158346..3508a62efa 100644 --- a/Stable_Diffusion_AI_Notebook.ipynb +++ b/notebooks/Stable_Diffusion_AI_Notebook.ipynb @@ -3,7 +3,6 @@ "nbformat_minor": 0, "metadata": { "colab": { - "name": "Stable_Diffusion_AI_Notebook.ipynb", "provenance": [], "collapsed_sections": [], "private_outputs": true @@ -22,18 +21,18 @@ { "cell_type": "markdown", "source": [ - "# Stable Diffusion AI Notebook\n", + "# Stable Diffusion AI Notebook (Release 1.13)\n", "\n", "\"stable-diffusion-ai\"
\n", "#### Instructions:\n", "1. Execute each cell in order to mount a Dream bot and create images from text.
\n", - "2. Once cells 1-8 were run correctly you'll be executing a terminal in cell #9, you'll to enter `pipenv run scripts/dream.py` command to run Dream bot.
\n", + "2. Once cells 1-8 were run correctly you'll be executing a terminal in cell #9, you'll need to enter `python scripts/dream.py` command to run Dream bot.
\n", "3. After launching dream bot, you'll see:
`Dream > ` in terminal.
Insert a command, eg. `Dream > Astronaut floating in a distant galaxy`, or type `-h` for help.\n", - "3. After completion you'll see your generated images in path `stable-diffusion/outputs/img-samples/`, you can also display images in cell #10.\n", + "3. After completion you'll see your generated images in path `stable-diffusion/outputs/img-samples/`, you can also show last generated images in cell #10.\n", "4. To quit Dream bot use `q` command.
\n", "---\n", "Note: It takes some time to load, but after installing all dependencies you can use the bot all time you want while colab instance is up.
\n", - "Requirements: For this notebook to work you need to have [Stable-Diffusion-v-1-4](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original) stored in your Google Drive, it will be needed in cell #6\n", + "Requirements: For this notebook to work you need to have [Stable-Diffusion-v-1-4](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original) stored in your Google Drive, it will be needed in cell #7\n", "##### For more details visit Github repository: [lstein/stable-diffusion](https://github.com/lstein/stable-diffusion)\n", "---\n" ], @@ -41,6 +40,15 @@ "id": "ycYWcsEKc6w7" } }, + { + "cell_type": "markdown", + "source": [ + "## ◢ Installation" + ], + "metadata": { + "id": "dr32VLxlnouf" + } + }, { "cell_type": "code", "source": [ @@ -68,43 +76,28 @@ "from os.path import exists\n", "\n", "if exists(\"/content/stable-diffusion/\")==True:\n", + " %cd /content/stable-diffusion/\n", " print(\"Already downloaded repo\")\n", "else:\n", " !git clone --quiet https://github.com/lstein/stable-diffusion.git # Original repo\n", - " %cd stable-diffusion/\n", - " !git checkout --quiet tags/release-1.09\n", - " " + " %cd /content/stable-diffusion/\n", + " !git checkout --quiet tags/release-1.13" ] }, { "cell_type": "code", "source": [ - "#@title 3. Install Python 3.8 \n", - "%%capture --no-stderr\n", + "#@title 3. Install dependencies\n", "import gc\n", - "!apt-get -qq install python3.8\n", - "gc.collect()" - ], - "metadata": { - "id": "daHlozvwKesj", - "cellView": "form" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "#@title 4. Install dependencies from file in a VirtualEnv\n", - "#@markdown Be patient, it takes ~ 5 - 7min
\n", - "%%capture --no-stderr\n", - "#Virtual environment\n", - "!pip install pipenv -q\n", + "\n", + "if exists(\"/content/stable-diffusion/requirements-colab.txt\")==True:\n", + " %cd /content/stable-diffusion/\n", + " print(\"Already downloaded requirements file\")\n", + "else:\n", + " !wget https://raw.githubusercontent.com/lstein/stable-diffusion/development/requirements-colab.txt\n", "!pip install colab-xterm\n", - "%load_ext colabxterm\n", - "!pipenv --python 3.8\n", - "!pipenv install -r requirements.txt --skip-lock\n", - "gc.collect()\n" + "!pip install -r requirements-colab.txt\n", + "gc.collect()" ], "metadata": { "cellView": "form", @@ -116,7 +109,44 @@ { "cell_type": "code", "source": [ - "#@title 5. Mount google Drive\n", + "#@title 4. Load small ML models required\n", + "%cd /content/stable-diffusion/\n", + "!python scripts/preload_models.py\n", + "gc.collect()" + ], + "metadata": { + "cellView": "form", + "id": "ChIDWxLVHGGJ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "#@title 5. Restart Runtime\n", + "exit()" + ], + "metadata": { + "cellView": "form", + "id": "8rSMhgnAttQa" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "## ◢ Configuration" + ], + "metadata": { + "id": "795x1tMoo8b1" + } + }, + { + "cell_type": "code", + "source": [ + "#@title 6. Mount google Drive\n", "from google.colab import drive\n", "drive.mount('/content/drive')" ], @@ -130,7 +160,7 @@ { "cell_type": "code", "source": [ - "#@title 6. Drive Path to model\n", + "#@title 7. Drive Path to model\n", "#@markdown Path should start with /content/drive/path-to-your-file
\n", "#@markdown Note: Model should be downloaded from https://huggingface.co
\n", "#@markdown Lastest release: [Stable-Diffusion-v-1-4](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original)\n", @@ -152,7 +182,7 @@ { "cell_type": "code", "source": [ - "#@title 7. Symlink to model\n", + "#@title 8. Symlink to model\n", "\n", "from os.path import exists\n", "import os \n", @@ -181,32 +211,27 @@ "outputs": [] }, { - "cell_type": "code", + "cell_type": "markdown", "source": [ - "#@title 8. Load small ML models required\n", - "%%capture --no-stderr\n", - "!pipenv run scripts/preload_models.py\n", - "gc.collect()" + "## ◢ Execution" ], "metadata": { - "cellView": "form", - "id": "ChIDWxLVHGGJ" - }, - "execution_count": null, - "outputs": [] + "id": "Mc28N0_NrCQH" + } }, { "cell_type": "code", "source": [ "#@title 9. Run Terminal and Execute Dream bot\n", "#@markdown Steps:
\n", - "#@markdown 1. Execute command `pipenv run scripts/dream.py` to run dream bot.
\n", + "#@markdown 1. Execute command `python scripts/dream.py` to run dream bot.
\n", "#@markdown 2. After initialized you'll see `Dream>` line.
\n", "#@markdown 3. Example text: `Astronaut floating in a distant galaxy`
\n", "#@markdown 4. To quit Dream bot use: `q` command.
\n", "\n", - "#Run from virtual env\n", - "\n", + "import gc\n", + "%cd /content/stable-diffusion/\n", + "%load_ext colabxterm\n", "%xterm\n", "gc.collect()" ], @@ -220,18 +245,18 @@ { "cell_type": "code", "source": [ - "#@title 10. Show generated images\n", - "\n", + "#@title 10. Show the last 15 generated images\n", + "import gc\n", "import glob\n", "import matplotlib.pyplot as plt\n", "import matplotlib.image as mpimg\n", "%matplotlib inline\n", "\n", "images = []\n", - "for img_path in glob.glob('/content/stable-diffusion/outputs/img-samples/*.png'):\n", + "for img_path in sorted(glob.glob('/content/stable-diffusion/outputs/img-samples/*.png'), reverse=True):\n", " images.append(mpimg.imread(img_path))\n", "\n", - "# Remove ticks and labels on x-axis and y-axis both\n", + "images = images[:15] \n", "\n", "plt.figure(figsize=(20,10))\n", "\n", @@ -253,4 +278,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/notebook_helpers.py b/notebooks/notebook_helpers.py similarity index 100% rename from notebook_helpers.py rename to notebooks/notebook_helpers.py diff --git a/requirements-colab.txt b/requirements-colab.txt new file mode 100644 index 0000000000..f9cc5600ea --- /dev/null +++ b/requirements-colab.txt @@ -0,0 +1,26 @@ +albumentations==0.4.3 +clean-fid==0.1.29 +einops==0.3.0 +huggingface-hub==0.8.1 +imageio-ffmpeg==0.4.2 +imageio==2.9.0 +kornia==0.6.0 +numpy==1.21.6 +omegaconf==2.1.1 +opencv-python==4.6.0.66 +pillow==9.2.0 +pip>=22 +pudb==2019.2 +pytorch-lightning==1.4.2 +streamlit==1.12.0 +taming-transformers-rom1504==0.0.6 +test-tube>=0.7.5 +torch-fidelity==0.3.0 +torchmetrics==0.6.0 +torchtext==0.6.0 +transformers==4.19.2 +torch==1.12.1+cu113 +torchvision==0.13.1+cu113 +git+https://github.com/openai/CLIP.git@main#egg=clip +git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion +-e . diff --git a/requirements-lin.txt b/requirements-lin.txt new file mode 100644 index 0000000000..3f22122eec --- /dev/null +++ b/requirements-lin.txt @@ -0,0 +1,33 @@ +albumentations==0.4.3 +einops==0.3.0 +huggingface-hub==0.8.1 +imageio-ffmpeg==0.4.2 +imageio==2.9.0 +kornia==0.6.0 +# pip will resolve the version which matches torch +numpy +omegaconf==2.1.1 +opencv-python==4.6.0.66 +pillow==9.2.0 +pip>=22 +pudb==2019.2 +pytorch-lightning==1.4.2 +streamlit==1.12.0 +# "CompVis/taming-transformers" doesn't work +# ldm\models\autoencoder.py", line 6, in +# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer +# ModuleNotFoundError +taming-transformers-rom1504==0.0.6 +test-tube>=0.7.5 +torch-fidelity==0.3.0 +torchmetrics==0.6.0 +transformers==4.19.2 +git+https://github.com/openai/CLIP.git@main#egg=clip +git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion +git+https://github.com/lstein/GFPGAN@fix-dark-cast-images#egg=gfpgan +# No CUDA in PyPi builds +--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org +torch==1.11.0 +# Same as numpy - let pip do its thing +torchvision +-e . diff --git a/requirements.txt b/requirements-mac.txt similarity index 87% rename from requirements.txt rename to requirements-mac.txt index 8fc4f4fa7e..7296c84cc5 100644 --- a/requirements.txt +++ b/requirements-mac.txt @@ -11,7 +11,7 @@ opencv-python==4.6.0.66 pillow==9.2.0 pudb==2019.2 torch==1.12.1 -torchvision==0.12.0 +torchvision==0.13.0 pytorch-lightning==1.4.2 streamlit==1.12.0 test-tube>=0.7.5 @@ -21,3 +21,4 @@ transformers==4.19.2 -e git+https://github.com/openai/CLIP.git@main#egg=clip -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion +-e git+https://github.com/lstein/GFPGAN@fix-dark-cast-images#egg=gfpgan diff --git a/requirements-win.txt b/requirements-win.txt new file mode 100644 index 0000000000..3f22122eec --- /dev/null +++ b/requirements-win.txt @@ -0,0 +1,33 @@ +albumentations==0.4.3 +einops==0.3.0 +huggingface-hub==0.8.1 +imageio-ffmpeg==0.4.2 +imageio==2.9.0 +kornia==0.6.0 +# pip will resolve the version which matches torch +numpy +omegaconf==2.1.1 +opencv-python==4.6.0.66 +pillow==9.2.0 +pip>=22 +pudb==2019.2 +pytorch-lightning==1.4.2 +streamlit==1.12.0 +# "CompVis/taming-transformers" doesn't work +# ldm\models\autoencoder.py", line 6, in +# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer +# ModuleNotFoundError +taming-transformers-rom1504==0.0.6 +test-tube>=0.7.5 +torch-fidelity==0.3.0 +torchmetrics==0.6.0 +transformers==4.19.2 +git+https://github.com/openai/CLIP.git@main#egg=clip +git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion +git+https://github.com/lstein/GFPGAN@fix-dark-cast-images#egg=gfpgan +# No CUDA in PyPi builds +--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org +torch==1.11.0 +# Same as numpy - let pip do its thing +torchvision +-e . diff --git a/scripts/dream.py b/scripts/dream.py index b1b9282ec0..891a448bf2 100755 --- a/scripts/dream.py +++ b/scripts/dream.py @@ -15,23 +15,29 @@ from ldm.dream.server import DreamServer, ThreadingDreamServer from ldm.dream.image_util import make_grid from omegaconf import OmegaConf +# Placeholder to be replaced with proper class that tracks the +# outputs and associates with the prompt that generated them. +# Just want to get the formatting look right for now. +output_cntr = 0 + + def main(): """Initialize command-line parsers and the diffusion model""" arg_parser = create_argv_parser() opt = arg_parser.parse_args() - + if opt.laion400m: print('--laion400m flag has been deprecated. Please use --model laion400m instead.') sys.exit(-1) if opt.weights != 'model': print('--weights argument has been deprecated. Please configure ./configs/models.yaml, and call it using --model instead.') sys.exit(-1) - + try: - models = OmegaConf.load(opt.config) - width = models[opt.model].width - height = models[opt.model].height - config = models[opt.model].config + models = OmegaConf.load(opt.config) + width = models[opt.model].width + height = models[opt.model].height + config = models[opt.model].config weights = models[opt.model].weights except (FileNotFoundError, IOError, KeyError) as e: print(f'{e}. Aborting.') @@ -40,7 +46,7 @@ def main(): print('* Initializing, be patient...\n') sys.path.append('.') from pytorch_lightning import logging - from ldm.simplet2i import T2I + from ldm.generate import Generate # these two lines prevent a horrible warning message from appearing # when the frozen CLIP tokenizer is imported @@ -52,18 +58,19 @@ def main(): # defaults passed on the command line. # additional parameters will be added (or overriden) during # the user input loop - t2i = T2I( + t2i = Generate( width=width, height=height, sampler_name=opt.sampler_name, weights=weights, full_precision=opt.full_precision, config=config, - grid = opt.grid, + grid=opt.grid, # this is solely for recreating the prompt - latent_diffusion_weights=opt.laion400m, + seamless=opt.seamless, embedding_path=opt.embedding_path, - device_type=opt.device + device_type=opt.device, + ignore_ctrl_c=opt.infile is None, ) # make sure the output directory exists @@ -87,12 +94,11 @@ def main(): print(f'{e}. Aborting.') sys.exit(-1) + if opt.seamless: + print(">> changed to seamless tiling mode") + # preload the model - tic = time.time() t2i.load_model() - print( - f'>> model loaded in', '%4.2fs' % (time.time() - tic) - ) if not infile: print( @@ -101,7 +107,7 @@ def main(): cmd_parser = create_cmd_parser() if opt.web: - dream_server_loop(t2i, opt.host, opt.port) + dream_server_loop(t2i, opt.host, opt.port, opt.outdir) else: main_loop(t2i, opt.outdir, opt.prompt_as_dir, cmd_parser, infile) @@ -109,8 +115,8 @@ def main(): def main_loop(t2i, outdir, prompt_as_dir, parser, infile): """prompt/read/execute loop""" done = False - last_seeds = [] path_filter = re.compile(r'[<>:"/\\|?*]') + last_results = list() # os.pathconf is not available on Windows if hasattr(os, 'pathconf'): @@ -125,7 +131,10 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile): command = get_next_command(infile) except EOFError: done = True - break + continue + except KeyboardInterrupt: + done = True + continue # skip empty lines if not command.strip(): @@ -175,20 +184,32 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile): if len(opt.prompt) == 0: print('Try again with a prompt!') continue + # retrieve previous value! + if opt.init_img is not None and re.match('^-\\d+$', opt.init_img): + try: + opt.init_img = last_results[int(opt.init_img)][0] + print(f'>> Reusing previous image {opt.init_img}') + except IndexError: + print( + f'>> No previous initial image at position {opt.init_img} found') + opt.init_img = None + continue + if opt.seed is not None and opt.seed < 0: # retrieve previous value! try: - opt.seed = last_seeds[opt.seed] - print(f'reusing previous seed {opt.seed}') + opt.seed = last_results[opt.seed][1] + print(f'>> Reusing previous seed {opt.seed}') except IndexError: - print(f'No previous seed at position {opt.seed} found') + print(f'>> No previous seed at position {opt.seed} found') opt.seed = None + continue - do_grid = opt.grid or t2i.grid + do_grid = opt.grid or t2i.grid if opt.with_variations is not None: # shotgun parsing, woo parts = [] - broken = False # python doesn't have labeled loops... + broken = False # python doesn't have labeled loops... for part in opt.with_variations.split(','): seed_and_weight = part.split(':') if len(seed_and_weight) != 2: @@ -223,7 +244,7 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile): subdir = subdir[:(path_max - 27 - len(os.path.abspath(outdir)))] current_outdir = os.path.join(outdir, subdir) - print ('Writing files to directory: "' + current_outdir + '"') + print('Writing files to directory: "' + current_outdir + '"') # make sure the output directory exists if not os.path.exists(current_outdir): @@ -232,13 +253,15 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile): current_outdir = outdir # Here is where the images are actually generated! + last_results = [] try: file_writer = PngWriter(current_outdir) prefix = file_writer.unique_prefix() - seeds = set() - results = [] # list of filename, prompt pairs - grid_images = dict() # seed -> Image, only used if `do_grid` + results = [] # list of filename, prompt pairs + grid_images = dict() # seed -> Image, only used if `do_grid` + def image_writer(image, seed, upscaled=False): + path = None if do_grid: grid_images[seed] = image else: @@ -247,44 +270,48 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile): else: filename = f'{prefix}.{seed}.png' if opt.variation_amount > 0: - iter_opt = argparse.Namespace(**vars(opt)) # copy + iter_opt = argparse.Namespace(**vars(opt)) # copy this_variation = [[seed, opt.variation_amount]] if opt.with_variations is None: iter_opt.with_variations = this_variation else: iter_opt.with_variations = opt.with_variations + this_variation iter_opt.variation_amount = 0 - normalized_prompt = PromptFormatter(t2i, iter_opt).normalize_prompt() + normalized_prompt = PromptFormatter( + t2i, iter_opt).normalize_prompt() metadata_prompt = f'{normalized_prompt} -S{iter_opt.seed}' elif opt.with_variations is not None: - normalized_prompt = PromptFormatter(t2i, opt).normalize_prompt() - metadata_prompt = f'{normalized_prompt} -S{opt.seed}' # use the original seed - the per-iteration value is the last variation-seed + normalized_prompt = PromptFormatter( + t2i, opt).normalize_prompt() + # use the original seed - the per-iteration value is the last variation-seed + metadata_prompt = f'{normalized_prompt} -S{opt.seed}' else: - normalized_prompt = PromptFormatter(t2i, opt).normalize_prompt() + normalized_prompt = PromptFormatter( + t2i, opt).normalize_prompt() metadata_prompt = f'{normalized_prompt} -S{seed}' - path = file_writer.save_image_and_prompt_to_png(image, metadata_prompt, filename) + path = file_writer.save_image_and_prompt_to_png( + image, metadata_prompt, filename) if (not upscaled) or opt.save_original: # only append to results if we didn't overwrite an earlier output results.append([path, metadata_prompt]) - - seeds.add(seed) + last_results.append([path, seed]) t2i.prompt2image(image_callback=image_writer, **vars(opt)) if do_grid and len(grid_images) > 0: - grid_img = make_grid(list(grid_images.values())) - first_seed = next(iter(seeds)) + grid_img = make_grid(list(grid_images.values())) + grid_seeds = list(grid_images.keys()) + first_seed = last_results[0][1] filename = f'{prefix}.{first_seed}.png' # TODO better metadata for grid images - normalized_prompt = PromptFormatter(t2i, opt).normalize_prompt() - metadata_prompt = f'{normalized_prompt} -S{first_seed} --grid -N{len(grid_images)}' + normalized_prompt = PromptFormatter( + t2i, opt).normalize_prompt() + metadata_prompt = f'{normalized_prompt} -S{first_seed} --grid -n{len(grid_images)} # {grid_seeds}' path = file_writer.save_image_and_prompt_to_png( grid_img, metadata_prompt, filename ) results = [[path, metadata_prompt]] - last_seeds = list(seeds) - except AssertionError as e: print(e) continue @@ -296,11 +323,12 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile): print('Outputs:') log_path = os.path.join(current_outdir, 'dream_log.txt') write_log_message(results, log_path) + print() print('goodbye!') -def get_next_command(infile=None) -> str: #command string +def get_next_command(infile=None) -> str: # command string if infile is None: command = input('dream> ') else: @@ -312,7 +340,8 @@ def get_next_command(infile=None) -> str: #command string print(f'#{command}') return command -def dream_server_loop(t2i, host, port): + +def dream_server_loop(t2i, host, port, outdir): print('\n* --web was specified, starting web server...') # Change working directory to the stable-diffusion directory os.chdir( @@ -321,10 +350,12 @@ def dream_server_loop(t2i, host, port): # Start server DreamServer.model = t2i + DreamServer.outdir = outdir dream_server = ThreadingDreamServer((host, port)) print(">> Started Stable Diffusion dream server!") if host == '0.0.0.0': - print(f"Point your browser at http://localhost:{port} or use the host's DNS name or IP address.") + print( + f"Point your browser at http://localhost:{port} or use the host's DNS name or IP address.") else: print(">> Default host address now 127.0.0.1 (localhost). Use --host 0.0.0.0 to bind any address.") print(f">> Point your browser at http://{host}:{port}.") @@ -339,14 +370,18 @@ def dream_server_loop(t2i, host, port): def write_log_message(results, log_path): """logs the name of the output image, prompt, and prompt args to the terminal and log file""" + global output_cntr log_lines = [f'{path}: {prompt}\n' for path, prompt in results] - print(*log_lines, sep='') + for l in log_lines: + output_cntr += 1 + print(f'[{output_cntr}] {l}',end='') + with open(log_path, 'a', encoding='utf-8') as file: file.writelines(log_lines) -SAMPLER_CHOICES=[ +SAMPLER_CHOICES = [ 'ddim', 'k_dpm_2_a', 'k_dpm_2', @@ -357,6 +392,7 @@ SAMPLER_CHOICES=[ 'plms', ] + def create_argv_parser(): parser = argparse.ArgumentParser( description="""Generate images using Stable Diffusion. @@ -418,6 +454,11 @@ def create_argv_parser(): default='outputs/img-samples', help='Directory to save generated images and a log of prompts and seeds. Default: outputs/img-samples', ) + parser.add_argument( + '--seamless', + action='store_true', + help='Change the model to seamless tiling (circular) mode', + ) parser.add_argument( '--embedding_path', type=str, @@ -434,7 +475,7 @@ def create_argv_parser(): '--gfpgan_bg_upsampler', type=str, default='realesrgan', - help='Background upsampler. Default: realesrgan. Options: realesrgan, none. Only used if --gfpgan is specified', + help='Background upsampler. Default: realesrgan. Options: realesrgan, none.', ) parser.add_argument( @@ -452,7 +493,7 @@ def create_argv_parser(): parser.add_argument( '--gfpgan_dir', type=str, - default='../GFPGAN', + default='./src/gfpgan', help='Indicates the directory containing the GFPGAN code.', ) parser.add_argument( @@ -492,8 +533,8 @@ def create_argv_parser(): ) parser.add_argument( '--config', - default ='configs/models.yaml', - help ='Path to configuration file for alternate models.', + default='configs/models.yaml', + help='Path to configuration file for alternate models.', ) return parser @@ -540,6 +581,11 @@ def create_cmd_parser(): default=None, help='Directory to save generated images and a log of prompts and seeds', ) + parser.add_argument( + '--seamless', + action='store_true', + help='Change the model to seamless tiling (circular) mode', + ) parser.add_argument( '-i', '--individual', @@ -552,6 +598,12 @@ def create_cmd_parser(): type=str, help='Path to input image for img2img mode (supersedes width and height)', ) + parser.add_argument( + '-M', + '--init_mask', + type=str, + help='Path to input mask for inpainting mode (supersedes width and height)', + ) parser.add_argument( '-T', '-fit', diff --git a/scripts/orig_scripts/txt2img.py b/scripts/orig_scripts/txt2img.py index 9f01bca021..6c43e73b93 100644 --- a/scripts/orig_scripts/txt2img.py +++ b/scripts/orig_scripts/txt2img.py @@ -232,7 +232,12 @@ def main(): print(f"reading prompts from {opt.from_file}") with open(opt.from_file, "r") as f: data = f.read().splitlines() - data = list(chunk(data, batch_size)) + if (len(data) >= batch_size): + data = list(chunk(data, batch_size)) + else: + while (len(data) < batch_size): + data.append(data[-1]) + data = [data] sample_path = os.path.join(outpath, "samples") os.makedirs(sample_path, exist_ok=True) @@ -264,7 +269,7 @@ def main(): prompts = list(prompts) c = model.get_learned_conditioning(prompts) shape = [opt.C, opt.H // opt.f, opt.W // opt.f] - + if not opt.klms: samples_ddim, _ = sampler.sample(S=opt.ddim_steps, conditioning=c, @@ -284,7 +289,7 @@ def main(): model_wrap_cfg = CFGDenoiser(model_wrap) extra_args = {'cond': c, 'uncond': uc, 'cond_scale': opt.scale} samples_ddim = K.sampling.sample_lms(model_wrap_cfg, x, sigmas, extra_args=extra_args) - + x_samples_ddim = model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) diff --git a/static/dream_web/favicon.ico b/static/dream_web/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..51eb844a6a4a9d4b13e17e38b0fc915e7e97d4b5 GIT binary patch literal 1150 zcmaiy%TE(g6vi*n1a-yAr5H_2eSt+l!2}h8?$p@n=nPJTglL%pit>^TL`+1D5hx&N z)!<{Tc1e&lvO-)*Ow^TsgK$#zJKYFEA;2&@TN?6A5C9Q()1;lGF^Sd zF~GSouqjvv->jVh^vZ3gw#sUXZQHSqR>WSmwCOtUf;BK6W$k#wMKX$aiq1TKiY)i0 zVAh_I80S)!qiamC2k7>K9QPINuKnap%uv%}j+#E^Jur4AXDJpbkvT6Ctz07yN&)Z7 znrGHFe)vUp?-<1^k5RnhDB0a3h^>+{H77oj<%hM0acGw^T{k?>wWp=8-IJ2<;2zkW z55$XEACugh&R(wZ1^nba=DC(TD08@HP|IVZ?1<#7_S=$s)|_Dd@;ZI;mZvYT`CA{Y z_Vq(y{pYvZf8ANnKfH$f+a32rZ=N(I_xgGd_x}n~fRYte5_cZWQRBiY+1KuqaiB`D zuiiy$g`D(znbUIcklw#ZXiGqz&xFs Stable Diffusion Dream Server - + - - -
For news and support for this web service, visit our GitHub site

-
- - -
- -
- Postprocessing...1/3 +
+
+ + +
+ +
+ Postprocessing...1/3 +
+
+
+ +
+
+

No results...

-
-
-
-

No results...

-
-
+ diff --git a/static/dream_web/index.js b/static/dream_web/index.js index cbd66366f4..ac68034920 100644 --- a/static/dream_web/index.js +++ b/static/dream_web/index.js @@ -8,20 +8,44 @@ function toBase64(file) { } function appendOutput(src, seed, config) { - let outputNode = document.createElement("img"); - outputNode.src = src; + let outputNode = document.createElement("figure"); + + let variations = config.with_variations; + if (config.variation_amount > 0) { + variations = (variations ? variations + ',' : '') + seed + ':' + config.variation_amount; + } + let baseseed = (config.with_variations || config.variation_amount > 0) ? config.seed : seed; + let altText = baseseed + ' | ' + (variations ? variations + ' | ' : '') + config.prompt; - let altText = seed.toString() + " | " + config.prompt; - outputNode.alt = altText; - outputNode.title = altText; + // img needs width and height for lazy loading to work + const figureContents = ` + + ${altText} + +
${seed}
+ `; + + outputNode.innerHTML = figureContents; + let figcaption = outputNode.querySelector('figcaption'); // Reload image config - outputNode.addEventListener('click', () => { + figcaption.addEventListener('click', () => { let form = document.querySelector("#generate-form"); for (const [k, v] of new FormData(form)) { + if (k == 'initimg') { continue; } form.querySelector(`*[name=${k}]`).value = config[k]; } - document.querySelector("#seed").value = seed; + + document.querySelector("#seed").value = baseseed; + document.querySelector("#with_variations").value = variations || ''; + if (document.querySelector("#variation_amount").value <= 0) { + document.querySelector("#variation_amount").value = 0.2; + } saveFields(document.querySelector("#generate-form")); }); @@ -59,6 +83,7 @@ async function generateSubmit(form) { // Convert file data to base64 let formData = Object.fromEntries(new FormData(form)); + formData.initimg_name = formData.initimg.name formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null; let strength = formData.strength; @@ -94,7 +119,6 @@ async function generateSubmit(form) { if (data.event === 'result') { noOutputs = false; - document.querySelector("#no-results-message")?.remove(); appendOutput(data.url, data.seed, data.config); progressEle.setAttribute('value', 0); progressEle.setAttribute('max', totalSteps); @@ -130,7 +154,25 @@ async function generateSubmit(form) { document.querySelector("#prompt").value = `Generating: "${prompt}"`; } -window.onload = () => { +async function fetchRunLog() { + try { + let response = await fetch('/run_log.json') + const data = await response.json(); + for(let item of data.run_log) { + appendOutput(item.url, item.seed, item); + } + } catch (e) { + console.error(e); + } +} + +window.onload = async () => { + document.querySelector("#prompt").addEventListener("keydown", (e) => { + if (e.key === "Enter" && !e.shiftKey) { + const form = e.target.form; + generateSubmit(form); + } + }); document.querySelector("#generate-form").addEventListener('submit', (e) => { e.preventDefault(); const form = e.target; @@ -147,6 +189,9 @@ window.onload = () => { document.querySelector("#reset-all").addEventListener('click', (e) => { clearFields(e.target.form); }); + document.querySelector("#remove-image").addEventListener('click', (e) => { + initimg.value=null; + }); loadFields(document.querySelector("#generate-form")); document.querySelector('#cancel-button').addEventListener('click', () => { @@ -154,8 +199,15 @@ window.onload = () => { console.error(e); }); }); + document.documentElement.addEventListener('keydown', (e) => { + if (e.key === "Escape") + fetch('/cancel').catch(err => { + console.error(err); + }); + }); if (!config.gfpgan_model_exists) { document.querySelector("#gfpgan").style.display = 'none'; } + await fetchRunLog() }; diff --git a/static/logo_temp.png b/static/logo_temp.png deleted file mode 100644 index f64d7d2dffaceb3d3e75275db831c18ee3421392..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35209 zcmbSy1yq$m+bseL0s>MZEe!&P?v(ECk_PDpX^<}I4rvcchcrq_3y6pAK7@3`9ll!s zz3acRE=!icnRniaXP(%5Po#>H^fNRfGz0{MXR;jWK0ZDOD?5apof))XcJ+2}Gx1_}aHaZp4-yuxW-c~PZZ?h% z6c0U`m^!+<2~mQf{&NoYPXFoF!S!G31Xc{%EKVEKb z2C=ZSu(xncvRKkxW&$6{~)pI5lLNqT^3 z{A)q}+t#ic-cA+}H49fqcNa4YNe?hgs)yM)@r%1yn7BE*XgE6B{d=cW{yk(0b`BOc z3VLlD2XjYHSBC%cfQ5vKn}rZ1*fn-$R$gWfRt+{*el9kCcCOc8`&e23*;LWd+{V)T zzir9`Hu!&P3T(#Q#LeXY+SuHT-_p^=-ULk8#@@ur0^;OgMM?3WW#kukv~zR;F9xII z_{S&|75QZyT-{6@%q(OjgebwBS!`^~`MFsw%sF^?c$xXk**KU@%{Wb%`Am5^nawOY zIJit$x%te^IRE{;grk}J!_NNud2{f*nK>`7DX%3PGbbAx53{8iCo8ilHy7xHla18` zbi~8K!Tay$m0fJW(K507|L*7Ez&)4*zl@D5*evgVod|Uc=YRcVXG8H1E8sUVd$4XH zO0$P;wJ@jr*Pm_vn-%zfo54T3^|ZDCo&F!T`=8BR9WC8FOKn<_kS4re>K1VpP~P2S|llw znV!6dX&yl=UU&D5 zF|LHB*kUM$BRoloKY=PvFtI<0_L=P_6H_lEoX2h{r053-A7EAm_6uo|og2KAD^NqKiM97-ZN{YlK2Neu1D z{a#~{=X^RTvbxu}NVN576G<5>AfBCWmZS1r%z6Rc1d{(4I>j~Q1#R(q$3__cF z)JYlRYf`Ah-2*py`-b+}{Y9yW^IknH#?Fz6iHY~OhSU`@BY;9 zcbqX2*0)FX$D^Af%0gT}=7BYA4Z}u6@~m=4rpN;L{|=yflq%QI7*!&ap25n#v}QxJ zzU0@{x#2aNo0yo4FD#7sb6EcRGm|NMtj$JCT`0%RwCICWdu^MXXWSWliO#fKQ}q+E zxVV@py(94ECRibJw3iT87$qnNb4FSxY{Bh7B)KJBa^v^jJB3}02IRh(s7u(-3k{Wo zNT?R&S7DbONcz$4fb2YdTF**cdATW}^=S6(oKeT5hjelAKw zB1b$xgiEiy>0&nW?riGhBP`J58`^BFsv^|qG>e#W?W`4j`%OvEU_~=1!TV6UGuec{ zG&Hp5!jzO1%L~u?p_+?W-@Q}MkR@DbT}%NRLHPlOO|Yb?oDrAK$jB!VAY zb#!!gn|u*(GQ}**2xG$AkmnHQ!k_EdgGFH@ga*p9YB?RScaoSO<#=l%VX-|Kpw!jT ziD4ix`5kLFa1t040aI9d*(s;}4z$OyLl{949xQS$S#f8TdRp`ff}5j6_c&CNm&9X^ zzHTTRm?B;QMLwKA!8+MXP)ynLMfFr5d(D0nZA@Z#-U*Bq_b?N)^*c6N2bt57 z7|HvsPbA)J3A)(xR4-kf80Hvf;@F-DNmEWPdy~`5`qulqYbL|y z_Iv5p1(PP(mB6Us`8 zscQZb`x$Efn}C4eWg$Eep@!Nd$$~_QF3}oeZJ$hp-2dVcIzj8t!uY%B-2MIU1_e*4 zEM3wx-+0gw<4AozD171~nKn3l-hS^_(Y({Akj4KpP?S}I6Xz8{e4!1|8@aE@^7UZi zoR-WQhOp<7&r7?42KQ)3Hy^(po;jU!hvnSdItEY?kve@__33mDoPm*hmXe(#Et3CC zi4$_*4!9jO=3R;ClBLF6V2x8a`*WjoSE}oDb zG*_&iUq3|-R>*6Iq#1hf^%wnE={dk$OGYtcBGuk?`zlQI!jy1;9UV%5e0Mn-;Bsk^ z_8cQ@Yim~mPqJJlEHv~nLT1S)8zP#Xo(o?iGXJ&b?U%!jB^XvO$6~6TzvUc$smy>W z5~kkyg9DuFbLP#`y5y^;H;eitO(*n;{ zM^VdKXG+nNZ2Y9c?8-{(`=QOjSWo+?ShSf)`5*&!XTFshuq^rFuzNLrVt%#n;9Q@B z1w8lY6O{4t@)D0=&2JwVK9@l{a z4&Zzx$WWJ6R8SJd*1(K>#uomh_M6zip1R6cA*3huC%}vYghC%b=HzL7Tl-<~#}Ctw zQ@b9SS^D%$^h;Op%F&Z5KEn4Yx!#5tH!U57mb)Dyvj%=m>8t zyEO6bR*CB#UT)r!o3A1G3}t&;B&VdBN1IO01dQ9}jUlex%Q0(Cx^8*csoRv8 zu?QHc84`%YQiTXZIay=vuR1YaQdwFz;-w`!{SrgLiKlzA*k$qt=I& zW>m%g#ThF~QDt~V3Wh&NOQ!tLR?R@|jg_=?z;J1_xr$?TyFa4EIzF=a^4xPIQal=) zqM7Z}LYvH#^vNHlF|SY&zpJFz9HH&N>C9bZ+G14wts6;Pe!iNrh}2<9)I1z_CoP?j z6cZ!6ci*ej`DXpf<-kz+5vO9`=J8>t_oVm~!1VvnTcMQhj&invL0b8`c6_Qr3m3C>*) z0dN+k%wM*2#UXU#%Hm1BBE%>$lw*&T0LN5QGlBZ4u@Mr07RQ2@U7#WeJ;IAXpdAdDEd)$a?seUmGKbK_1E2;k)3o=u= zsOSiZ0^j;~G(m4a4nzUdgflR(6jVFEjw+ytqRgxS*;s6sLt$ZI(qwrDC0V?xx}m(g zB3Cg@52w97ZGw!B_dJtSY_``pp5%AG>euU^?e`~RD8rQ?iT3xz+_)UrY@4@P9QivtM7!LP@xoUht@8Zh8W-Lt zoy9331|f(AAhdLeiHSWD&I`XiU(9nI;Y;HfNf)<~eXgISFGG!k8Z7GU%w`L#SnsH4 z^C-QZbbl70jO98i80{DU&$5ON7~kDk<$4{-HrP$Sp~m@CRP=#Co%VRp*q|%B$<^A% zp5sF+G!Z{9V1&7GZouaN@~OT!)SO*bSNG*aiQ?+O>x!ja*O6D)7-Q4ZHc!)g%08Z! zDyZVkIq($sNZxG62dsH^+zTCg`L=Km4-a$l@YI_3$t-v8A z-Mcwn4%n}m|Dr2>I8a`Blyl(g*|J!vbNE;fN{~Yz_4|*;@nsa`+ZCP2a%|(`j;l8w zX80qW(@Dm=hZn2y zxO;`>@0rM`u>T_cT~B-My<6SKck{RFV%WXIWIw~I1dg(H+3M!uv3ur!K4a@ht-AJw zV|1-JtwH{&l9?G*Y;0`n>7QBfD#aU0eEREO;#h}CHZ%=wYH7DD8(0-lHYeH#Z z(K{{!t80HY^tsEvEha#1z#UOko;HCx@+=Os;%<91rUBGs6!{W`crRYO+=P9qXb8JJ z-aH;2?2z@0SM8K=;2a_)Uu>N=swa~y4aL!vn;L%jlgLjnEo zKYxDx`qkM_m(#t&WgFu#c*LEhdEk1^xc`|J$M<4cI@|k??obv_Jo(+Q5cF)j z9hHjQMp`pjHkIkb@ap126er5Qe%@%Kk-u2yf>Fre)Xc{%gsU3C-0fIi^m}FBH{AuK zSKr_L^zRO)k!j6O1-_q8_{$36Xu5WAXmo-I2I_>&faejEoc~ z-W{0~6oc%_LWk7kkXvkt=UH9V6INg0{K3Ovxe7-hS;$b#;(qkjwdv}{=$E^l5*-Lb zBKMc02EKwfAj{(`Oq4;@mx<~W4j7-^Vlf$E7-7Q)xB_Na)he{axz&!%0v7f$Kb zCj(Qwvi;7c!>i9jsO)M#L~a3}3|pu>rCT2KbAvkS@zVoK^fyFeGP7}(3ETTNfRt7JIKW}z7$A#KnCHJBbJz7KS{68d6;!*yfG&A77lNN&GrlZ-m+ zt1_{%kF7q`w&g-l4T9*FBj4f7pnZG6h|^Df?>g_^dOIRe6(S_xSLOUU5_$1vgXc0p z+dzzxY%{?BI7@goOM>z_glrJrFys?+)Vwij>XjM6yRl;`U}sM_xqsM9?LEGfZt8G&S!Z zz%7CEb7whkGe3LQIr54@?w$8V;p>Y0va*QY5?VaTVkk7KyrMNRK=1_nwRXjvaCx`r z)yL-EAQY0$XFLY+6j63I*2u_ihJ~Q4iY5LtG2P^9^hz}m+dM>$b0oVeI2ujF;zCjm&%$1cpCoEm+QQys6T8CUsUTG>t4gbwR-<6Pdcp+ z+yJoGW8`ztlX⁢wZ~$gwe7W8`|@$e_=>I@t0dpZZiD4lDP7O6K@WQ|HZO(b6i1L zL_vfIc7ENri0x}7tM!4D5KL({o+oj_`@Gb$1x>}1Tj2=McQ;?lYjKa$W}9$}K0s^R z{7R3*#9txnN;OP39YC80;bcW8;4pO3c-xcO0nAy)zAqe?Gw7Wl?%zHIs#Qp1X(bI3Iokh>-qWl`kb4C4X zF@uIJIk=S*`wcBWt8hKjP5ej=?84@o)k(Fp33B7$lpT&NO=QLC{D|pp64&84=QCC8 zaqlKK53jsv%4(hD^*rXyP9!WD0VDn#B|J@wuCQlPErv>@VPTkSrQA;yI#^skzCPp) zrf}+V3>d8YJYfyDR!09)V&f%9md)~_dB*l5IyOca?^=Gyql@9wdo$Fu;f5q9;1#Y% z)4!X9lZVajnamjZfSk=o@NCMA0yX5!evk&_&6~%nY6cQaxuWWta}F;)Iej=g^PrLs zrN}R1WGRiFIa0%uq{)?8a&C#}!L6&SEBeuhj3?OxG9Z2R*n)T6rfjWbJhGZ*w(3dC z($;%{aj$FG(U8StMzf*n?lUK(sm6)JVMOuAjUDr&jUA`VJMec;!>POk9dh6Ai1;4$ z)3tCKVd1OdMM{|chAC4(MDhX`JN!a^+pYhkieCbF>fSL)3x@uWh{>C!+w-7gbg6ew zZNu&!0x?Kzuqt6$xm+4rT7M6u$w`l;XvDBVrktFbx^noyNgx9^Ep?LCW1;h{BPTVUtWI_UA4_I0iI;n-kFN3 z(ZC?2<3|UXcly`i;i0(z+QplZMn0zv1tOxx0=Sk~>Y;?zZ7Tvet&W{ezl&h(irC21 zEjj0Dt)o3JjECG|hSJ{Ne4`@wg4)Kn92DzZ!icpjs(7HdQ^@5^=)x3A23UxFOUIxR zog7nd<^44*oAuhu&O_kP3o*QF>;Am`+R!NA*8K<_t$~=y(9lru?>P$J-%ca|BK7*6 zv+raH_|{quQ$iq)Eu<|0f)~FUe8{>8iIKgh%p)BQT%N{@+(}$HMiVfvG;QXthQ2V$ zFDgn`;eF$5@(R598P9^3dh@((Wm~VhvT(oasK{tt+gU+()Iz()`6mr#ZRmLInoL6imWzP&OiNI!F&;LJHC z=Kec>;GYK8AS83=-GYy!v);EgjhoLEa9|M50QRamDJ;gr!_#@JE0y&-N-lN%)9hzY z3fmPLS)BR#`G-;^+iNAV_3saKf$7SV-+=@Y&pQ_57Y|TWkyRyPd&1WEe~~w1E<1gY zXK6~$a%{?oCFZ6`zdZ6eh39xKEa!MG7E_3jcPu)dOgr${y)DW%;d(|xwmG|ahdHUK z*}dXC@S1UJcbD^tZsm1~9d`pak82OU_R`_|SWbav%+K%)fgCgZw87AVvYX8eD};~s z+`l-x3KuQK)$xo1_PDqm=6-^-H$x;qxYI*yt$;rN5xL*V^Ffj8>_SV@esyIL+Z*P3 zBs>2z1jT2ut+#*leQ)8$rRC*=XoTc63dByTs?VX&<@0ll@_DxMS=N4Y996t;D^af> zQQ=wl<-Af)HwU1`gw2hiYFSx@9NjVTQxL-1YLh@-zTbMR)XDlgi<@pj$|?-u^yGwa z#jLT^%G#1!n})2kp(G3>6;FO7pelujppSHPezTIC_Ah(`i+$@LMnQsRkL#)_iHCf` zCTPnVyOCK^>Y8VNexA(xRFvL)cR;Fu?Rd$pWq~czQo!})!tBV@Y#b5V&U!XMiJzB@jI%G^EMS}5MK;Qp%5%HG7I=lMvE{{frB9w!^Lbm70}Qo!NZ$xSj=yV+o_O z8U=oiwr}iSaO}MUMVxIvFPb$ps%tsFy*-Yrykn&q?myg@=Pzeu)Db%HMB)$WTgqit z!}<9+uhrO_H9C-kl(#3HISXGZe!&m05u)@ ziAR&cGLsO~Gj3n6;c&ITpKExgh$D$7X=3Y&E^_f5%jevj z6JpD42QOmm%HIxIa{Zz-p5l~lOUn0^%%I4dI15Q9i(;1Pka9+ZG zDa;DRsq0xs&*~vIMraeT!gPLnB00*0yO`nY_Olt=3>9N#0BSh~YzjQ!n!TR6JMkGu zn^qz%nN*k}*`V|uG&KDH$OK0tGW>Om*1IeC&GLCiVQp>54wMli55AmbXYP0CSUbIS zh=NU%G3(e7QW(#lIeYLe1k!%-qsrNY=`rnZ7>DP+!Gmk0oHb`GXXLv|H}g319_M;i z3Bd~Q5);g&5a|eOTq2i~I$BC2(F8Ruw?K{xl+Tz6) ze+yU5eSanL#jrN4;p}X0T+64`2+H{S!NWIQB+Bz157qv1j|G4~_G2p5{CShYPt^&{ z&HTAHqw0cJKdFGf7yn}1k!#HvRUs}WCLB)gLHJv-9Ev{Qa_ z0x+86QSE#iMqFBjHEo#DphcSDz@H6*l|x?l`uYd2+I~HM@OQg(P=-3`w}$p{7x`U; zv7rz19%;_St^eGOe!TGn^qrD|uaLJe0KAf5(ygys z_&urH5~aY-FD(raFVIb820OM=fQZmwx1b=ePt45ByiKB&qL~8n3bkB0_4&CODNdYi zjzo$K1<+p^Jw$rKov6W1HZn~K;B_e}Db$IwdU|@!EkXd#A=sk(Ym9R-fVgT$7U$W^7ttZ*Vjx%JNu)29A#0 zRG)L~e1g{Ch+*qgn?9DMkyUhm#>F*RSJwcFd4PujfOcST(7DCni_Re*2rr4_I3-FM zZ>+R=goONe&#Lz<6U);D4)Ie`E?Qcet}9wDoVLR?v^197nji3;KqSnisMi4soJ6wH zYP%5y^bgQlj^}*8vj0ue9m{OuX+~P)uq9Q7LJi1-K@0ryRThA1m(Ve2YCFk<->CiBZ@ z{18+~b3UU&c5~knW@h-!Wr@b1r+idKZDx~*{vtuiB5^O-k$S})N8XxNr<)+nJhq%9 zSDY9ZQ1yFNl`!h&qk=N(g#ul~^D!G5mf*c9_@bzU$9paBsx&EUevKZzd=({+^sHz7ux&@tBX=sBQZ2(SoMM zmV#%T6eX)?ncu*gZ-~&j=z(YcE!lwIzD$YVaNWGq*>HiQ!u{8kM4a84SU{8J3ErNl zgKS4lO^ttdq^yOKt_RR?lss(e(v!b`FCA`V9RVTn3R?2`vxTF3lD z2~?&=LD3x_fYYmIvyOmHc+%BwktD78trG_V2rCQ~P5jC}d1u+@m2GGISU0x^)G5@U zG%kZ?Hps`*{-c*&oQaOgFJCaShQCu`iTqGk7K-d!{Zvq3`g>R3&m=MpC=2q6R&mRr zsNr36&e!&>=LzV4g@mZqkrb!O_} zIfE=twNA|bK3SaL3aXOuCADewl#--F*c+wDv=SSD+fHnEFT94N_H4bC;-JQfJPF|@ zL|Lpp62gSt#-R)yls1=yy`|_hcDTotPh|otfS5MOm2UwM7vNBni+ zZmZNTzb9~c(>g>NyHY@TbmZH;>P$pLEKB``3866Z%b#Yne=Ru+$M=C1ZX!7m| zZ5LNn4m?})>CrsGb9M&gwERx-&*$7<5OX^eRaA_Wo#cLU%7AK4^j@9w{<z*hdR$ zU9yefBLXC`aIlo1!pTiDdLIemXq;kJ<4MyEF??|qoUt!vy>wdI+PD}`z8#;<>b72x zGBTF`^%8uEo|fTU)OR4QiKis6rx6E0EXo9W{{RGCvguJ4+m&K^G>hV504xL1hHr=r zb#ONHiTOrg<4>|grVzU|#p_<4_V3pR0irgtPwn^iW@(B@Ad6-t=K_DmH1Jgu%RYa$ z?7DjPa5C(sz9sBr>XBgf1%8kR`Zs=lC%&BQfQ6>r+EL+?3#4w->gO+5eyU;m7{aP& z_dqRP(X>I7nVA{GDA-G0J4O>t$_7`T+2iEmavtV_EggPql>fq8(cXu#@A;(=%E%HQ zo|*Y>;r*&&#eLr5uR8bF56q!m!=slQnwrI5zs6=}61Csv9I)CSwAt9((h)?*W`1RT zy;5vLWC4T$ckj4Hg>8Qy;PyGOO*+e0A}5RrUt^Sa=qc*z>b~!@p7V>pojcI!Q7|!5 z(|e<;`sm*OMAZQlpm>_a;4o-vau8TQ;02U!Zf@+Z8;=a$Z}ZTzu#~iY6}mkiqkR2P zopI}$j@ecM^NCSAH#-d?-%Ky=$B9Ia1{H4>Q}LeY>Y>lv{B4}1!g&gGfDs(3uRQZV z%;JTlK;8{PHIg2!T%rU$9wkns6M!h30UDbRB_bsy)s;o$+llt(Smf=Tc?J*CNHL4f zmQ1#&GiSf`>#tE@&md`BT&)1hv?us3GYo{Al7_HTTbOg-USqZMD($;>w&U_03aq5^ z0s;acd>gm`I-zY;7?hhOXwp`F#ndgzSs4dHtEhwgVk6#AQV-_^A#qi z>5*hIx*-Fe4{O;ez4?7tfGx#gWK?1iU`S_X^hSg>Nf;R!L5LrHFY!jJM;==(^fjRf z)}2>3$&Z%zDt)u(Q;WA7{80reIyw=f@bxk2`<{t=Z=&!)S$Fr-z*XGULtcO}khm=c zWqZun00D;H>`dTpfN&v;5eQ(gY4s9?Coxc1L6f{if~)oC1KY~a>W^d?2fp2rdAfHr z#WBcfa&bv?ncN*z27vPRg}SP0-0U8ihOa4m##*wv((;s9JPirvL<(Boz~dVBV|GRe z9pCM5b?27(F!e(Eg7`zjoWqEZGeT}nbJ;HA{eVrUO7mviuy6H?Z)sUHkW$41Z2P2U z$`{?*)Ru$x=u`L~Rb!EF7u|7jn1W1g) zAgK6N00A9mJcJC4yKmLa0~q&xhJpXI1>hsb{`?6mvEgG_(&d;MtWnVSK>@5Xs0~J4 zuC}dyjgMml2ii4m?jqIF0ijvH7l~U=RFqt<_n9t86Q`@VXi&jqfs&B#Jll23bJ6c9a?C~d zC$zL}@>B&{Y)SG0-nem5VxGcUlL<>&<&1%r1D1{IKC;z}(mczaG~*IPLD%D99x_nb zl}_}>Y)|kV*cz__0*5wNrUpDhtva(lZt}%HKBR5Yt^l~Nw5*yMoCQBmw)4@JG;1a) zVDZf}%CzYXRTU#jP}_IB*I#PsUqedU+Jtgetm)IwYIT^ToSmPZp5n?d%JCQWIDI^& zlf!!~p<43!b7XKZB1`*)Da+MWPhDeuf<|(-Z(#3hkclK&$PEr)Ki=Be|5RE^S11u| zcIZPga2v-Dka=iwa_Wjplh;jk-@`Wua-i10dFcdY@X>mu7kWM|JyghN|*q}aw2u)TUHq+g4J%^ zxXpAkOI+pdFdE@8L2k16LO4+po$@DWGrtfLv4e|LYv2iy08S)!q(seJmJl%qwkPYq z;(FqPu53_48&+j;EalL0%KgV)M_SQ+IPj+NJ$ilD@=^gh#PrI4VjHj@2{IUiF+abI zZaML$^SeXP(9k0Q6$!`U;iEC4H8TkxCGAH^V?TRxL3jLhN z`}9$e46opdjSJHu5X8P3KaA9zq&8)9Xu70b@H$o;7#IMt9qA|yBqiG=2DI zSE>k#s;>outQ-c5lex`voDaz^qi&;z(D8m-c%@clK}iEa7LyfO9zE)oDA1uOl_ni; z*3P0_90MIO%<3cZH5d+^uc5tBx|`7!}evI{dILP6#8wR zY+z8%>!6KhE?`j*6xj9kArZ>I-T4LhfmE7vS6?#)1mt}BfhyFrCLbLgm7mEr6JSjx z3KC3Cmj^y%V}mlznU@4O0bFHjotu#s-s6ncVq0+`x@>uMYdVr5Wm|MId<-$^tpI{_e~uq;3?$%wKQc zf1pc31K@+#91B47pOM|dsF?Ilh`u|~ms$bI!m=AL@9r=0rjwdM-t#%2s9`j96-QDF z&OJ!mK;#rGHXr<1@&TFRI3p0>9*7#ZbtI1JD&hD-J0y?~8b!D?WzcX5(-x-^Z z%r1n4eJ?2PZm)qx$?y&Vsmf1%#}1HTO@rg8u#Ad~NG}@-#Ql+l@xqxBlwwv^n^n7K zPlH7T2~fnWj*@LOv@~DFm36IIvJuuW8bP6IIx3+zvC@W9(|Gc#H#s&n1I^cI5qj)3pN-*oC#_0j`%`QnF(+>UHH> zv+;dqR1LeHp1$jNq#J8`2GP5B(CJoZT+ewwIzTggs)6E!`z@W~3bgP$&yZ&OEqE*B zv4sGk9VZcKss&}U5;YFnO=F2-VO3Rx4pYNsNzfn8XD#zeU|tWPS<~f4R;&UtW?xy_ z$as(rQ*Te2K``~8fz$WLb_?U*CeY{;WK{I@o@`|UIZo^C*(`z#ru3Ekh_~9UhDM52 zu6G#Vd9o#=3DndL#l_7g9D2l5@!mnppN0l!`|LNh9*+q0GT1km1VnHt7M7Psi`?9u zw}Ki$@OrlvKxY$m?zT$3vY5zJ!ef%Ux(xCJ9esZ1!&D@Z80^BTpUTU#fPIe$U)wRh zX0$o57Keq=39C+UnvT(5hK4HSvg4;s$nugX-3w)zD{{xWDBug8{#FljilpQ&<#>%q z5$J6HaG$|q9jXapjopi!N*f}jQrtY)u%@)h*tgx&6$T~nUNN_{Sanl5f%FT3$e|2Ff_HMbgfctY#gr6QhS`#Sn z<~ZxdBpK?9!MyvJd0Iyv^fC7w=l~i4i(a=l)-4i(YN89fB?R{**~9=_5>#CbAjN?6 zNQSl;S|;PYa9)k&0Z3;hrA%VNkvH;z$h@@d%A~gOdM~kKQPDT+?h2m7^i$l|jEujy zVq5mVg-G9>qFz3}M1tm_N@3Omfgvx6<=O6Zw$Fk$YNB;F9s9|J1d#Xkty)8S*ZPw( z*e%d9-wk!vn4~Y_>kby_Y3OtU4aEqkj7@IXQ6xHj^@okC0>i?jfPbEF6p!yi>hwF_ zM~W#nWjXPd+2U7&zU^Jy&y(gQDXFS*q7zfoOc@`?;pcDpw%0I=@X@YetSP{dr{eLP z$E>}Zn+K3xvX@vIXO_H@#Q|kmT|*rJ&0)tg@JdxfO(BZDi8?<(_Xh$QdEUeESD}|+ z#U>>1#5aURN)}Gpv0v{tl23BgcbhpU_DhUJq{?b_4`z?GHkv<0cvFGgUGl_C*^_55 zmAPwsD)lKRWK>#S{xx|(E8fpon`5UM(;hTo!v>N{Snb53glhMyN3Hep8vu>Q7{`@3 zl~zoSkN+qLML+yD&(~HyODw7C)G`l}?V6J=tfu8K_;i&smAku?Nlehj;yYXW;-YWq zgDfpEx0XZswY9D~h~aBfYW24bK*wKTt$jH!G>Ty3TC;CY}&&x4U$q5J;)+A-!THh&hWY zAaoFwloUK&Q=juaRs?|TyC>PkMsiI;Zmed#cLrCn^3Sfd^k=IcYrTfP5hN=FwSeDw zPF&YA1Y1~IQm}*5-e|}C=i~~~N8oR<~Xksiej|(7Ot6U}-q3W0GD0(8* z<>k1g6Fq<`9(;sCA`0}+Ref~)*_fbuk^>p=m};Cd37T$(H$&8Nu+xP}IAVS|KqR2S*$yI6?oJo9VXx=n`eN9~GDu1)j2x2|-nIPJ)K6#zvxJE%bvQjC zg#aH36p+cr+Aa{umDpM&D^mgt$xOtDGj-QAs{f-fM~ z6As_Wdq+71dhS3;&*r*uhaB=fXNErN)k@!Ju6=8D zI8~T4MNtI(QQ?yz1K&~_Xh$(_wRijZJh@jEZ+JZJf-*JBr1QNaQ*!wzs^1VLrx zuPS+s?y|=KGNDF80>}Qz++tq_FKt}y76}by)T=0(`7+{=pI7fifMY*}Vf3o4<%yz}3%XLBo z==fWT2+r<6x8o_`5t#ZP6q=w8!H(~{j#(ysZ9e>qIuZx8QlTFLq%#k);pX>nvLAJ5 zs3o4)GHuJl65(wNF`je&0@?=7kL~wJ1r)~`r(ZQ+0-{t#ULGAw_<|y{Adrb_GpbVW zNm*uCE|Ct@u^b<@!EaJ9UG5-31p-)hBYrsY9ZoB)?L!On3^4t3x^E%q4eD{&G_HKZ}u1v>him4Y{5XT{=#&j=mb*0r?0 z=p>LCP{mS50{n#G=4i&2cliS@P=8Yaq?s?mLbvw-i((K5;H3!Hia@IXNO;8WZ_>~tGA*T)YOehQWfTS{NemTFO2F*S2A5lI?H1Os1@Y|7W4kz!qy2i%R zdtp3eG1j9&IvkLUusM$xop23z#9P$dq1A#gc7F0v1F!w|t0UlAGg`0D1eo+2Vq^rt zwIvBOJW`sBA__23vx74#R(u(8_E^RLWi_Q=+?$jlSGcs)m+=*&?Ztp#|*a_kFC}aXU2_5?d~^h{uOiiYn+xIm%@yie zMsb5CbU2YXky6+nc4tjJLMv=R_UCJa6$Kren+t|w`oia1&!JE`yPBb}XT%pS>x`y{ zgo&JWo8p@I*_d<_=kwvRNwf(XcnuErBkx74UgqflbU@t zndeLA*9jIGd{)`t?>MZ+Cnm%N$;i3&jhG0!92z-5y&V-51?qQQ*AQ0uO3A{p`Gvgy zg;P}eC#R@;!a;(Gi;66@C(aEYc;<$X74o^b=_R;d>O1G4tYnqJI*|=h?La6Lz{z6F z--mo0t*yJ4om33f2Z2N)tiJM4BC+J_a5svs{##xE_{78Hb!FMe{l6u9pD!qSdh*n} zyN6UfOr&4eDo4`Z-hL*)_F{*w_vh%S)xd9?Z$6XCBoGdkr0nMqTruokiMhoL_rfbXq?~2hjN(024kLksng;&k=4oG21RcN`x(;_iz0%dr<&DEN zSx=qR&j#;EVR+!mb_1X1(W`^a>PlrEYB@L3`f}mhc^FyZ@Q| z?MgKUqI}b8dXJV6n~4$?1HE@sjV`Rd@4i)c5B?~40fmAKW}oRHX4`$_t_MQrzow^U z?dLn;bUV#T($XjZu741219EYEa#9l1>b)1;^zE&JC5p4?jkDsM>Q7HXn?93I2QvwF*6^I92?3}oNK0TD9#^E(&fiez&D_N~K{|i1_jG<`;&T z2E$Lt+sgk@(^-a9xpvVSBt%47x}=p3X#tT=0oj0bgLHR_(jXxn(u#z1N~d&5gS1GC zbe*}rbI!gle{6Iu*1O(#=A2`U`>|4g%+4w+LqENHlI}(@^Kb+IYiVg>cX!FW?SyygXuF&1=G!>f{iQGiVN+}oTiKB-ai$po~1s(^&$MxsWA4Di0vA&o=#W^(HA@YU!%tsUwU}py%I4SP? zB19a1N!@;Uvd)PsJ9hz9< zwgMt9PDbgrWy4VCy??=ikPg^rarseWz5w%-vstkQ`t6mt$^&c{+5>1C9HljLPY)8-qIC z-T2%ZF12-asI@(V5b)~gh!F__F$pdXamS+nKczxcIq8D#?D{blk}HS?HHFs`GlzL+ z%)dYVIy^2xo`}XU_`Q`?qn?599~Gp*qm4LRbEPJTm9D!jA9>aN{lPNc&QKWM688f| zG!7%kP*-IttZVh^_wBWqt{rib!Hp6O@%JV~V-?j5G<#qTNfvfzhXBEB^KeIGsTDkI zj>ZA-0LqKsY2#9^)$1%HqVOEBCLA{C`P?=xt?uK9BK<4ccovC~$1U2qDsnWy1DayF z!?Fa7dSC?|v_e+sVMm6G8-x{^j2GAoJju zmie9(e6h0jN(8mDORDR$d>o=y(D?e_quJfZbl1KAv~f{QlkPK+twZ7lh38~0>ppu^ z0C?Zr>d;m%;Lb+4RF&^*UK~+u+@x5LjbSGdbOQ>7PTZcT$uY%B<`YY5v z-{vR@35i}pau}>ju;ge_v&4@wf;bCD+SMgVtLg`bbUz^B!SWxaB4amRvUME)D>@a? zb5!gQx@&O|pNt?P?7pf5-tC^y3Xz=lkJ97c>UHUJRs9b84IBo_dtg)u$yxV{WmS!j z;H*Q;koIpa>38B~R*w|l9emE|g>SPp9_mCXV*TFtQE5&?H=uq~#e~3ML-hQN0LFjp zaP*(c%I>05Wlh7<SgCe0&VWBMv;WB2@kyDV*{)5e^oUHD?#VrcF7yIztNPD znUdQrx}0{;+M@7Viv=gr>gioI?N=2qem*fLUfVnz61E4IHQB1W;5T(MQ4g?F=agrM z#jULTd!hI8r8p!<^5*Z=JWTcY3>1E<3jX;9kdc>OkjDE zT@!fiyR9*LapQS3;_Be-#3H^Bslv*}wtQ}pwR(GVK`fDtJNfh}3TAQQ{}KdiNlV`L z&5m37&^0_Txt4rEVlv9S{_vsVl07t;c6^QixImfktqwGp^8Z}>lZVbEc&RumHHSL zz7B~gV6x;r_@ZByBuP7@W8Z95MY+D!bWZAfu~k^CQyZPr-k#3<()gm`WBxqVUh|Ri z&CO}cvP(TP8w(HCwAThtDU70^fa{@f28MwP9*7%5Lqq?TLV{^Nz}LnYub9po_Wfj* zJV1-JLE$pxu_b#_1ax2l57PJ`j0uw%-uX!Su+*YAKKcf;LmCN1NY!Tq7t97;4ST}D+ zFK2GO`d^LRg1pi58#m#w@Eox9^iVlNKvV-dWVm48{4CLX+rJA(r}|2F;7?hz@owtV zo$9GY7k40q#B3;-*6n_pDM`*4jKI0Gu;Z8*H7dMEo4x(pi`~>=@Z|V`R}Yh4gN>m3 zk}K2h?k>blY30T~tq>^4%L{DoRDQ|gG@4!|$c@ElZt^JryRyog#Dp!wB!;k;^b$>) zk6we|BdkYQV56lU6Ed!n^}o{GS;L*4-%t>p0_m%( z^CRu%9RgVW!I~19)2hj$$?ZC;5BU&YlaomIE1P|l}>^NR&bZs81YU%b~q1`{*e1Y?5&Sqlb zXU+p!Q5bl!P-gu;^Z-!|INyiq4E<(jw~^A)KC=ukwr6lFAlH9*Y4aCP7q#H zl#Ob1GpC*eNdIzAFx{6tiOZV*a!WBVtl@Bqs>ad70|uV^2;UOSeyF5WYgi;i2qPnF zzFhq!U{Y7tN#mZt8Mqb)7iT#Mn$)|U@B5-QUY>N=Aq0>#7+=pREKEB&;S(2+V}NM* z`onb!kJdKDR1rbj{n>>%X+o*?Wlp==w6QU#pCtwn{p-rM`t+gFv~}MX_52v6!@kEl zIt>1Bf#;Cd6|y-se;TwUq%Hl(Ei^|Zh&T{@0413+!`J?ty^+@iFhvR>iIP#JK^0ec z&^$gYfI_*7X4stFvCIi6e2cvlBowDA3haU5-MUqCpapz9@h4l@-iDJqwt9Nx?|)7J zVM=_!k(@OxB6l%?smDT-7-G2t_0EFlYw=JbfX~wC3n;1*KRnngrWP+>YHF&`#sY!? zYmbM31|QqGH#-FJXZ7@S1QsF~KQFt)C1u4-ZFbq!62x?AT~EHr^jYCbf7Hvs+J$%bnWyCN(wo+72OJK;*8nk|1fW>zIEYDIj&XI3`GyYi`}?Fm1b( zE>qTqzwzGLHCS%^{u1PJ25`TASc!)GY+c1|qF5VY#(~+sZLJL~sfNr0{4Jh>VA%Oo zlxvm;T!UI7q!EogwbM_k;N+xm`bRV~zKQ-{Hk85b~(q*3tcyI*wFyY3g z^Gtq{)4`(MR1cV9fhZy`fUn#VitfEADH2`uW|#PzR`6l*h#+cKd;vOTk)5gk=TvDCQkl4#WA&+8wk-GRRm*O( z=+0=%Z@9bfSr<>EPv>#>04tWcT)BW21k2Mj=vT>3+I^<|*Xkd8T-MYk+ zzNjQbjSE9XYPmUfg=uI+;?MuqR2^PUx-MY+;b6^cDo#HiZfT{4QKOfa7hq`6(KkF& zW$EFzH~EwB*yl)RDW|m5j^wDH{C}PJ*cf-gaB|?>ckkO0#Zle|a*5?Lw!LwkZFi|V zYg?{u*&DBP5O9ae7_q?X)2y)ZE!tScPga<4Jn znX$F=2>NZOMkraXrvqGHNm1e*{z%Q1!$r-BWd=A4_?gI+CX?NlL|#Zrb8+e@JydcP zqCn_6x_;;UR&pO9p5_bYpx_SF4!j$;+w0&v$eG?Zo`f1wZbBixSxR?1TGJI*zRdwY zN=5(%09H1$vzvw$Gd4iW*Z{V}Myc@rS`k1FOj+NX);CJomaO|WdhVa=7FF%M^ z7i*Q@`55~}R9{uUZL;M|5Fvj1`3IEl?ctSO@;13V3=BF0{(IUsZsahUk)>~c z6hhJNSQ*o^>_nG6KHu~Z8CecrT{r{N)AM}m2rl>F0>EKZ(}vRmDrM53#29KX7&`4f zv4eKHIT zrYt8+VqFJhyV^O7?!7{L$-jTFdo^m)W$n_PE-Ny|^4O1(fC!arnIHt$EL`Z#Z79zSNwttpKzD3AtgBvgYC z(g}Lcro+t6#z28NIyxHNzYg;zfP6U$WlX3fW7r%`@IPsvgYpl^A3C3l;@kocyuO2h z3Y;Aa3wjVDScW1qMf~$V+1D*?3vRAogC&Z`(&%@@&-cjQ1!eCF@yI&9n2UC57Efb*Yv+%0Vc_0=+az-1LZaUbY6Q;BW#l<(&?6WKVYGip{IKyh zAuB6JDe?E=rPW+h*YshZp&(=7Vmb_lUV=W3<9FcOd|qOoZ!M4gxS!>dwGexfs;hA9 zE4V(xbC~Hkbo=N%L4kVsko)%CIoM5NK5dbKNMePvrwFYgc-Ac}m@ap|<5;9SqMx68 z!Ugjn%~wH{(70US;rhB_x(KOuqD-D&PAtBVG|bX^sQA+gGdz?mrV9g13YqDCjq$gojdU=1)P4ex=>1F|6&jB zpJod0A=nZ)~s%R*Pm)&i6 zz~3?2V#+N@!!#I}>7U2B>ka*1RTZ!E()XjYwRn}!r6p~mp=X_$nNaa>3;@LDTL*Rr z^#MT?mVu8kFIks)C6`&26@;CzzxrCcPyOVD*{B^tzSEdQ2(XDi6U}LtG31|=)b+3C zLNhD~iMsra+8O5;P#KfMd0&bO<| zVTc|HTiV*@*4W`5hKb8~-|gy>N_Re)QA`$a^jIZuT?keAcahvMrSIX}tXh&~8E&EC zj>BG?hJr1CtC1ncUC$;VnP_I376pjqLF+`1)HludCkxbGeI{Mc^=a)%l4^eAgKarA72Pz)IZ& z6EG3bazcY-A$5>i@L;}qau1rBI?3e%FK4od>ytRp``=;B zZ8Bf5q)vrP({7o)u@97^kNfQ6?9?z?*0~+d8 z;G~KoI2%WqWvE>V5fW5U(b(vypv_^}q2wJs^%PDk+NV&umuvBUeVpl)riX5fGhnJV zwq(I5Cp}V7tej{pODBFMDoUa@`)V&pJ*{B>ABX{vJ1|8zu(^J=}kAcGJ z_ao?Banc;N8kBkQyS5L%E!|wgwJgsoQgzl@9=WVfj~mf-mqFLCsZPX>-@fV4oUtA4 zy?%9?a#2Kp^QG|0PaQj>y>!94g+0-Jdu=T&ROb|}ms+xZnK59X9Pk`oto`lM$Z0E-p6v8$7`2p!q^C*GZ7@F_8x>q zEELAV>{_E^NkR24o333g0Kw{y6Lq+s*~E^bycIGOd^x?OI^4pURXLR2-*8OsAV7ig z=kq%$dpJQ`4$#zr8`%E{5|Hspo4KDpakA;A&|pZbi7IIb{u_BAu(hU+i{#&G&q^0U z;`wH;CQw;PiPH6$V1)Ong+fyl{l*Sx>ItU!3{P38b|e|8VtCB{(jzkamBT_j@h=*H zkj(a7?A#*I`9j#~!te6%a4erb4imsCbX5K3T%t2X;j~APzeoH$At`C~oAAr(z<0S@ zUvk>UPcw#$La5~F$sXZ{k)XUG>6Dss;nm1)s)z$G*UI4;Rjrj-XreSTp+xI97VOD# zO;tj$SpcIGTkzk@=Bt$mMQm^MGcp_MVD~!*UW_ugI~HP_K$j}WueO~Iw|TwenJice zsT?sevFrW7@SyrCUILycMaG>D*zuY%(b38B6$^9jZ&W(5B8er-hhGV$fIb2}LHJ+p z(&h`|c{iPIg>HfXexpr{75Bhg9>ESMTn9)Cv$_FR@Isl=xZJ`xJYalF0{1$mZ*WEX$8v(+!HEGg;vc~uGg z`u=Z8c}`5O-w*aW-98^ABJ$~dQeJSA)3R+c`kM2@=(CCn*j_{iFd#m`cK!9$CjA!mWv;4&5f0Z^f+z=Tf z6cXmaY)98ZN|)j=5jQ9+B5f@+O*X48s7XZQXzm;!y#$F@f}W^D6w@jI0nx<{GsUkR zmoqa%%PKhws&^jxRG$54yyM|78bhglgOi6o6X=#C@5}ulSa`Mh<(AWEg+5x~iqnQK z#X-P$rVT+Nt^Wz^gvadcD}9BT8{5SC;Cwg|S9X4OkVWZV|FyTzJy5%KO>u}KO!914 zU1e;37F4QCSt(D!AcSjn4E>ctFF=j46x;C80>KmuUg0q5+^c_ozMk(J=n0Pu@ZJxk z{&yU*@Xoj8fi9*8_xB%6)c%0y5PLhgYzoHJf}AFb!hSj0SbC}$`^t9@xaK}Xn0Z}2(Rd9h zKJW#Je3Uo*qG$Ig>xj=pzuzgj`@`YhkZS8k5^S^P|3_b@4aGC`G9&?f0V0$C_~?#>;av|^aHP>gPXpWoPSsn; z|C8=eUPlTKFc921$$}w`nreWp+;3FzNnY@{G&fUeC%SLdy{VnCoNf3B#wnY|gAq+a zw6ZA$C8em?YqfFiAv5-V?(CPLP)p_(s~71uh0SONTwHjiyLn&uv38^1k)9xLgrrRarLK!m`MT0TVV^77VQ+skw z@ikSNhGY$}!z)$e%DFxJ!vgqH!0@Yx9N$`74xu)O-?--K=7tH)Mf9GXxnkB3KLns) zE}yXy<*L`S>o&f6>J0F^W$3XQ7q;BI{;b;(Nad8197?YEMjnc;M4E49N$_Z(reIr4wpHGSNNb%*VoOU-j( znnh(oxKYw!dGbGE%rR|LaclYqvdJ@^I=zi~&CyMCXD*W%Kyq{I?d<*CJ0`?D*mAfI z(>9&F@b;dME_x-pk$8Lx9W4u9ljW<6#%p9?fCBa!v4 z+!Zh5dO5Hj!P+01lRT7UDmoBELnE)&kQ$LwME5$>m^J4O*>52*Ai&V-W?`>cml-#DPL+Dd?Xk;89!CRIVvKhk!L)dAe#74kh?}Xl+f@Sm_`vf!Tpz2u!nneH z0^Jk?o?nK$3EAQoo-u9w`e>GN)g*GKji%2O-wMM#2$^YERJYH=4PMs9Jx>PG&prw1YLOoZX)o_-mzh6W^k1uv1J(IL8U z=m80)&*9vkzdUbf&Ks6dp#clef7QjTCSjV`Ia@+*X+XrXw6o34{~l$^9mv`T6 zKkcRiO0B%-1IAq_LpsWJTH27~y|e}3%})k?%+yN^v5|KrIO`b*3pHL`8xB=BnxH%1 zWV811Ds|XZ8dc6&+1pzoo$m3hqp`9{kUJE^u!2aFDD#!B|iK#HL}R# zUeLD;_tLQBivuJG_rmoonKPR4EDa5rA%&hO0{OAZx+ty`o{|Q92Bs9*PgsW5LR1lW zSm@&h$@JlKwghRulfaA1q=IS-7X#~0|I;KN{g}JR)89OK*h)$OWISU|m)bD=D0jQc z)de@ACxy=nAFQsjyh3@DmFYeI;=_CJBu7IlNJ`aBM7$yFUYCQ_i?WnnoihCSgID*3 z1R*Lsw!nM^?IUu%Jgl^Xb}|aKtpAREQm3N2j9sc~$46&UPUg5rT_|h&r35rei_5r- zGOeq2{d1q_L#hTtSa{$b$wSxgIlZGjR0?vAT%(pQg{xAko!iHM`~I$fvmqVTDpt8A zP?~aVSfKE4zXzm4=w-X9ZNR2X?Q@2FM9j+B=@hc?xUvkC0_P1yVD(5^>&&rJx9Wj* zUO}%4Wj~CN6~xX_9{_g)Sq>L{MRT0vP5=#Vxa2`d2Zj5;QJ4)~y(L^I56CI1OIMax zkPlc+b4`KSGvIZ<=hE$(B;A9S_~;y>0ZICPq)JnlKGB23I-X_>fNJN4A`rz%t$J>^ zZozI*uZctn>=o|O6I@cYP)nFt3^Nj99(z6jNV<##)3C@XMcBlUs{z4>QMW--jl*!J z_)d4{w?WK^O&)r-T=q^xhB>s5#4Y)|SbRM_eSoqX)aL{)9@p<$$8hNc0L0>Ig$v z<&Y@3)HvjVQ2tzMJ<61Q`5{Zi(eW@;-{NRv@WX3W(;)aQNeQ?yU1!@-{5e*$c3G<+ zl5qkIM`BsFo1Ft518$9uLOb(bbGG2L&x`2`=*xp%dO0uN&3g#aG#q~C3NWgk8LXF% zPp|N_N|mO^-_mTtZ^U~@pfL$_xqtuuLDz_vKR{L59^})$d=oDOHZ)cuIzr6l{jDvY zgX(?1FQD9tZ=P=&{2HuX07t2xl=|JpWyKq9m1pwK+ZwQfgWGT0L=r1>#L{n*8jvZ zX;Cm#$k?3Zn_5ZcuVc3cIU1on4cuZd2Kuxx;8Hh;G>fvesWrlr=9!W;Ctg}EEWLV8!7Rw6zlx_Z`EAA& zj8j%NZ&u!&AI*$S%gV`d@y+lR;k>W+e)Zq$AD{_3vLiB9nGr>#lLIniq4q+aLTYyC zp!>mX4+7nxhsd)}407%3F3Ygip8M3PVHbo+QYiHgaJ|Ash2)12y9+%Sp#=HKXX3wg zS_gv!lGHC}^c)$ST^2VA9C)BVvamh9Ggv+9pPQ1+4(I}(1{=~iX2%n4i|oS0-3~Ux zg2S1VqPWG$87UKWIzYpKH-~Vt_hNKzh6oGK8HC}agqJ!xZ-=+(h$3Qp4MS99cPAJK z?om80Tv}$Ni2k%)Pkzb2SPs9P^RA7bpJ*KO!C#iLze6kT@bMwKW062WdKX%$*G!&4$(rJPid#l`|jANM~xCMcH_q zr8)5nynS(W$Z7y)*ptu~4{?-EwJjYt28tZ!I7LQ`ckOhH`q?jW^dm+)@XPR8 z(`OPA6TlGb>DP7#VfvDK_fGo1w-n{n$#UEB>UYd5XMg~zJMrqKD0`rFpl!#|^8 z0GzbGn?H@UBuN^7H)uXrs%P-lAda41PNH@WoNdM0weN>Yv9Xgq17epS5zgla$i+!= zKOPw!Rd#e7{j_hQiFyYaP8(m^Abu&UQP>ox$*e2ng|u4yP=-ZsMf4UBtRvbm;~|Ws zu#~<0s=8nXXk`Y?SF6-jq{bsDzqSc2VFm^CtUe-&D*y9UgO~5Aab@3GQ*)%nlkanY zQi4>yUrQE_=v4rU1(mqAr(oN!QnUe3I5ALRSpB$nnEJqXCmKK?ES$pqS}RW#llvuO zdfv`$tu43{SN@LT$3+XQ7*nH5@{4ZHR%&B=@?@pNeewBSWwBk}JAStT6`iT47PB=# zhS08m^Kev)u&gJIsHDvYdn+Zq=LIj(dPhDr7sQ??CZ`CZc6LzA>=fZKyPC*$2C~t# zji;?GB}{%!x5rGlD&W^+C8L1dJlRxjxgPE4{A^VPLbz}=>1npd?@G5J&Ke#O?s2(W z8_N);t}KG5G|nwwXfitzIrCov=;alj{pR#??VHE>Wn2{3E{*d*u}ow$kQRwG{Im8L zNqCS@N#zJM8HD9c$o0>MGHNXJnF_P`*i_44%5XNf7zX_po&Ut1u(9QrRDUGi;4oD0 zFb@xpz~T>O-C@Kg($E0!0-&Idy3_1Ie__gS2*RUFmG_?~Hvd6c*dgca%wte`5%-cg zxPMnVG0%wl*Tylu93<8?al?XB%9pTEpHItK4(b84h5cvQQY%DP@OT{l?L>&v^j;iq z<4KKHxUM55dOQF?l$sqbJ1k6R@ap82Us7N)Fv~aFTBs<0pE!#KU7zXbb{U-leh;NL zD_kYQ_udN5{q^+iBybcpI1~h!mSjyI81ABet7(HXNV|LW@SD*p`bxO~BH~XKV^R1+ zJ%<^fS6e$fpozon8XxY|o1U2XA6Xh19vTQ8cK0-~4a25rR5c!E4TnJAFj%{|lF=1W zKC=tA=>985%VMLU|NinuB@wYp#?d2&^Fl(ia*R==$puyd>B{NYcY)01Q~yVy!Q1Pg zZeiM{BU$V#(Cl)8A^L4)O%P@p1eYZI4+H4VdAl}2G{2ncF+$!J>oq3kNM0o-D>@qD@HBjB57Gwt7!BlC@f4cSA?^*~hXbG`eEWSo1tX5`k5BeEsenmT zJNF={HG-ohvb2K>LNnIhi;BFDw;Jvvw885c04zev)Mzzjou^|NaWI&i+?f+;n zwlE$wx7*D)S5cGuMt^+s2roKV`)`{!h0m|HgpWxC*dZ?y7Q473>*NJBmIVnhp~_RQ ze?p&gn(3nI!|)?|v~JFw@DEV|Xc#5SJG*&W zWg@a(v-FnXb^YL({0i@nwXsi>R#mT%ae$Y$`m+S}g^{i@6KNqXrPRP11%S|UX!^Fn zW(n^X%vFe2t4XMV_7uxiFkZ;2fI!{AenspSMY6KdR_%P6m>k=D#THvV3eS;h=WOpy z3$)>LL?9cT1Bh;@j|u13|1^jQ1k_Yl z0GEZ~r%JmHbnvL??&)1Bm=TO~W+f%bK}klukA$0>DY3BOW;vZm7sV z?dK&0NZQtaD5b@b?t&2(0w6vI%Xa}Q5Sx@_(Zv*P1QY=hUcg!EO7Q|7L510pa%RP_GoAk?Kd06jWT9e zx~9*kVMqNx-)c-a)-IOl=ZJsI-HksMSnvfrcY#{;&>H{H<{ANPu~7M&$)YNjcl~C& zlFQvt=$n=fSN;AAkBsDe70RB+5S7l_9>oMgLo0uLf^cWO5WZms~q=IOFT|v9jOn7SQp-Oofny zh`Ek@&wWJUafT@7b<%2Aj?P&%!zsGqbmBdqiV7ElTBdsYXD;)fP>6J3E5>*O0ONi4 zSiZtcogc)3(EGU+eE zL^JK{PVm_20Aub4dah0PBQN4JfatM$`rl(49Q00Lky3ww@`iiHfQQl~Jq8nge_Fu8 zkg%)X=FvkI=|X-Y?JnGJc6>JTk*xiKVkjt;ts&&7XMpL~YB72yT85J*0)k3q-zLj{ zrhl&`xw_9kbEd9TKb)R6$OW6RDPe(Y^qqE8|VJQxB$UO&C?*iyR_*vjnELz|ZORZBPnkQA_> zNt{jc%~vB4=_c8e0->iar8d=)MfqyNNnfCmSY-Aze^FU-dpCu|2$?(1O|TW|n^@06?hT?P;yuftdj&t#Z=a zjYJ|o+c)BgwkEXCDh0j44^4-Qf!Kb)#mzN3t(efDM1SyT7)He7$0V!X6Yyq#kT3KI zq-pU0ru8#+KT2xIaKIQ9E;(rWfw!^jAkaO4r8pINbq1XXSZhCQ6qF!pgL~wQdv)NL zfTvjsuU;?ng~xL@WAZGNi-knbgTTAzXC~I0jw%vU&*SKI*_qhbN^BMy9yTtBGMXEH zaLPaM5Dx&~aPR|izF&tP-HNyFYv4OvZo5l0koN`Ml=m6Aa>XNkd!(%6DOuIHtFs<4 zQp9267d)C@b{UzTmNZ%Y4^U>wcu0CvVC0ylbVFF%!a4o}&t;U4`Rl#Y7Ji`csk}^x zCNgCp?5jFQ2@nce*owEw?>uKB^;%hUxIS*WC|VaP zFO1C$6r|wgv(3-1Mwpmrq94%JY5$@d-ZW9@AWCG{3ol}ZjxU`}-Wcct_YvuK9^2-7 zCL%FHGx%i~2iuC|20n*Vo|_BqOOjLgnP&uvL2bW4j@r3k^MUU(R2+JA5GVRAfPN?0Etmfei0vb1k~H8$?|1Ac_divn75=-IxmMB6WM>_LyKXjCUY z4s0KcE95gJ#wg+0IfNUtZ+Mt`Mqhi4;(Y*V%y#!++H+jkR|q`Y`IJ)5l<*iS`oel? zqdnAlFw|&I<$vAM0;z|mA3AOVxcn-mYr5|L@(<_;jOVeMhaZpN%owK|VkVbvgSdnD zd<4oqMzW~r$Hu1xxLj?5*_0 zb#^PIZSDQ(-f}T-)In*lS`9NU#cDwweb1#|sa)#grPO*T-km5B@r+o#m0r%YqdV&C zI?zQZwcN0iy2HgJa=gIC9o0#uskIl)<#BE@j-{p+3e723)EUMd*a(#d7X#{QBMQ_P zr;H(c3}IZNmxF!$uh-%{keg7`ADmTAB0opcpmen`ig*s}CbvdPRJA)qtxvlXKU~KS zu!>mZ9hyY;Csi!OF0Q4*Yp@2;rJpfHmu`a+0efBreTblr>WWUjQ-ESUWlKYX1-Pk- zg%YFOrYZ_f3c1RvN9ANDBF*Vh5&O})l-d1(CY;B=%Sja8H|j&o|%YH>u_-EEwD2hN}Z#SD1?k)rIk943*q zoI$KAlm06aD~p;m-uC=16})?xWcowclxu36^y`h#D!wy%qAa6UtsUK=7JlJ)3Qxbc zDlPzqnp)Pb{S3UkcdLnAtB+kSuX7J0ecy34Z+tKtK361F$W`vaxbrL$@5d}N#+}1D z)v>EcL5qKLH`fdbzyJKs`TV(eHhbXFKyZK@L*J|`IgD8ZGNOwMv%!u0>Oaj2r*+My z;|-nb8nNq0fKu56zsQayD%<4ho-|oo5%MOEWH{u$cgI@#&sq@8$j`+#FfdMA@B6yV z{iHrP8W>lB8SB78aSF`vmz32AY z9upJuG0U8}?$htzOR{#Zp=(&MW1@&Ym`tq(Kskk5$LBJu+0R$Zumw_FvWO>Uh0Usa z%QfXTfARKdeJYPTdkBROE|`(G0{-FU8Rl_t;;%^Wg|WM=#8}X=y&i{L)p7 zlSDWIYtx<@l&s&t#uFC(NvGcFG#KDL{wSLRz~fw_eX@9AWfP4iX+pU;KbB`&@g;q(Y_BBV#0`$r35b1~>V)r|^ z-bd`IdDKl_cIpiXnU!j(2#?E>T4rWuPU9$6{_x`57q3yVR=>lhfqcg7>_Idn&AJH2 zM26+-jWj_sv-NqGai!o6Jc(>hJ{>!H)&Z&R5|0#|5km{n9OkhZt5>2ob4z=huq99K z*GisTllVT<7EuwRnU4M8*S~FA=6Uw@pRlmNP-@|Wja~uDu}5Zol*Zfd69y=HQ=_D_ z#9*u4o+`5dry@E-o6&ubA5z^# z9>j`{kBK&g8FmO|N6=?d}6z*q3f(em;(S z49BnAH1B>Kyatu0Uruaep5AaLTvNsW0YVP+5HaH+X&Rl^^|FJNvECwm=n8QA`l?*N zkF>S^NynMm)gSZSoW5UzFlD$HE(!V+2A#GED-jcZA9K}bi%{YLOgP6EN~8-gO7pf> zDC*rDVP}+s9wQE#2j>f`!U~T=Z`i~^?uGD|E~?=?Id$0j5>NAXrw}#(qQ}J|JMViR zT0pu!Fgy49_wU}`6Cp~GDhwKb$^S#0wz&US-_8voX>_fQ=b8g@;Oq-LN~mgTaWKVIMXS%{E2>A zpx;p#t9}y+QlrUldmc_V8Z$X#_@Yr?Jo5899zL_&>bbgTSmJaXTQjPE-*pC_lt=}R z#`M!go%Rg(c{y-tTmLM5G5LyKj*Kr${K%6(>((#SII@tdM{7Z55QPYZ$8GBO0bTT3 zN$n!24=bmaA+B>;<1VnzQE_qPl?Zi-?Jk0g&-%J56v483ust<8KF(t^RY~FvvLZ+* zo&S6bo#%H}5$pA|_3W#aLsXr#G*v>gY5o8gJto*bvuvtoXQtr5KKj{7GDI$UcA>7?t*Uj0QIbQwE+nFO%?GAA z7+&8AGJJP$_ulU>=K%#)^zm}w^(`qWk<3*E;R3%4kAR)it%pJN*E!@Cd6m>#)GKw( zScjWV&_&VR(rlXR@`Cdf#PaLvLN_)_=&0$3D{V2YP}!txa!GeBy?$*IiE zZF$k&`Y6&NXgJoGwal!U_HATL1|{XZ?-b?QxkzY`z{=w^rG0bQD}L*mXAB;%%%Sje z#9yai5ROh>jGCVt?OVMU^+AOr7%;G1{eI3EXt$CigXxvii%*osqyh4U_kv>+V0+F zZY}fQxi9n0n{G&pZ#wN2GE3l>b}@w@Cixgj2w-asjgPEa2eg}+?t9|F)(Cx!@tOt+*ZX_WE|iB(JCnoQt)32z}`o4^zje@fa6IG(>5g^-_q95fTfDy&o;E7 zR|rQOj{n($_%)F|`W|VRpZVKEdVQpbfSJ}{MVzyL<0`P*3{!7E$1ec>6*M(r$yJbM zQ-*cA;e;@l6VXL!JC7B!Z8-=aXvjqscjWLr&tm-#VGrLCR*r%|D2fU1ad{&_33RCporZ$EtW7TI!FfRs#&^oJ&-9vx$cv{Drg0D(BaRQLPvPW za+%y-Ljm5l>&6vawE}yNXK#Vh2Qy{EuFb5Yzx`zgf50l$QhNJjuJIFr36tNK z$#BOZi6OG|UfFYiT>#934TjP*W)IibE=6SH9w^oR;d+gQlS7kJBnR0QTecEbgXYNh z@6l7!LU!IiMA|Semh7fn)oyB)tvI20n;r3;M*4)B5AIUZ(ySIEktrf?3??KFB)N*)18uRN!{YvNB*XjncCsT+7rGS%$j+$V z2Rci*H5wa@a-?xKt=|p_@b{jEFzC;HMjC4MhzB@jpPf5c@rpZo`ESt=@JHen_SS&0 zf2<@ZojiKe>hcNQq_zT=?o6jOxj{z@>+Z%8A!-9x Date: Mon, 12 Sep 2022 14:34:36 -0400 Subject: [PATCH 6/6] revert 49a96b90 due to conflicts during training --- ldm/modules/attention.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py index 1321e9db1e..894c4db839 100644 --- a/ldm/modules/attention.py +++ b/ldm/modules/attention.py @@ -297,9 +297,9 @@ class BasicTransformerBlock(nn.Module): def _forward(self, x, context=None): x = x.contiguous() if x.device.type == 'mps' else x - x += self.attn1(self.norm1(x)) - x += self.attn2(self.norm2(x), context=context) - x += self.ff(self.norm3(x)) + x = self.attn1(self.norm1(x)) + x + x = self.attn2(self.norm2(x), context=context) + x + x = self.ff(self.norm3(x)) + x return x