mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'development' of https://github.com/lstein/stable-diffusion into development
This commit is contained in:
commit
7ff94383ce
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Create a report to help us improve
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Describe your environment**
|
||||||
|
- GPU: [cuda/amd/mps/cpu]
|
||||||
|
- VRAM: [if known]
|
||||||
|
- CPU arch: [x86/arm]
|
||||||
|
- OS: [Linux/Windows/macOS]
|
||||||
|
- Python: [Anaconda/miniconda/miniforge/pyenv/other (explain)]
|
||||||
|
- Branch: [if `git status` says anything other than "On branch main" paste it here]
|
||||||
|
- Commit: [run `git show` and paste the line that starts with "Merge" here]
|
||||||
|
|
||||||
|
**Describe the bug**
|
||||||
|
A clear and concise description of what the bug is.
|
||||||
|
|
||||||
|
**To Reproduce**
|
||||||
|
Steps to reproduce the behavior:
|
||||||
|
1. Go to '...'
|
||||||
|
2. Click on '....'
|
||||||
|
3. Scroll down to '....'
|
||||||
|
4. See error
|
||||||
|
|
||||||
|
**Expected behavior**
|
||||||
|
A clear and concise description of what you expected to happen.
|
||||||
|
|
||||||
|
**Screenshots**
|
||||||
|
If applicable, add screenshots to help explain your problem.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context about the problem here.
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for this project
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Is your feature request related to a problem? Please describe.**
|
||||||
|
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||||
|
|
||||||
|
**Describe the solution you'd like**
|
||||||
|
A clear and concise description of what you want to happen.
|
||||||
|
|
||||||
|
**Describe alternatives you've considered**
|
||||||
|
A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context or screenshots about the feature request here.
|
@ -21,9 +21,11 @@ How to (this hasn't been 100% tested yet):
|
|||||||
|
|
||||||
First get the weights checkpoint download started - it's big:
|
First get the weights checkpoint download started - it's big:
|
||||||
|
|
||||||
Sign up at https://huggingface.co
|
1. Sign up at https://huggingface.co
|
||||||
Accept the terms and click Access Repository: https://huggingface.co/CompVis/stable-diffusion-v-1-4-original
|
2. Go to the [Stable diffusion diffusion model page](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original)
|
||||||
Download sd-v1-4.ckpt (4.27 GB) and note where you have saved it (probably the Downloads folder)
|
3. Accept the terms and click Access Repository:
|
||||||
|
4. Download [sd-v1-4.ckpt (4.27 GB)](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/blob/main/sd-v1-4.ckpt) and note where you have saved it (probably the Downloads folder)
|
||||||
|
|
||||||
While that is downloading, open Terminal and run the following commands one at a time.
|
While that is downloading, open Terminal and run the following commands one at a time.
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -47,7 +49,9 @@ cd stable-diffusion
|
|||||||
|
|
||||||
# create symlink to checkpoint
|
# create symlink to checkpoint
|
||||||
mkdir -p models/ldm/stable-diffusion-v1/
|
mkdir -p models/ldm/stable-diffusion-v1/
|
||||||
|
|
||||||
PATH_TO_CKPT="$HOME/Downloads" # or wherever you saved sd-v1-4.ckpt
|
PATH_TO_CKPT="$HOME/Downloads" # or wherever you saved sd-v1-4.ckpt
|
||||||
|
|
||||||
ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt
|
ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt
|
||||||
|
|
||||||
# install packages
|
# install packages
|
||||||
@ -316,3 +320,20 @@ something that depends on it-- Rosetta can translate some Intel instructions but
|
|||||||
not the specialized ones here. To avoid this, make sure to use the environment
|
not the specialized ones here. To avoid this, make sure to use the environment
|
||||||
variable `CONDA_SUBDIR=osx-arm64`, which restricts the Conda environment to only
|
variable `CONDA_SUBDIR=osx-arm64`, which restricts the Conda environment to only
|
||||||
use ARM packages, and use `nomkl` as described above.
|
use ARM packages, and use `nomkl` as described above.
|
||||||
|
|
||||||
|
### input types 'tensor<2x1280xf32>' and 'tensor<*xf16>' are not broadcast compatible
|
||||||
|
|
||||||
|
May appear when just starting to generate, e.g.:
|
||||||
|
|
||||||
|
```
|
||||||
|
dream> clouds
|
||||||
|
Generating: 0%| | 0/1 [00:00<?, ?it/s]/Users/[...]/dev/stable-diffusion/ldm/modules/embedding_manager.py:152: UserWarning: The operator 'aten::nonzero' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/_temp/anaconda/conda-bld/pytorch_1662016319283/work/aten/src/ATen/mps/MPSFallback.mm:11.)
|
||||||
|
placeholder_idx = torch.where(
|
||||||
|
loc("mps_add"("(mpsFileLoc): /AppleInternal/Library/BuildRoots/20d6c351-ee94-11ec-bcaf-7247572f23b4/Library/Caches/com.apple.xbs/Sources/MetalPerformanceShadersGraph/mpsgraph/MetalPerformanceShadersGraph/Core/Files/MPSGraphUtilities.mm":219:0)): error: input types 'tensor<2x1280xf32>' and 'tensor<*xf16>' are not broadcast compatible
|
||||||
|
LLVM ERROR: Failed to infer result type(s).
|
||||||
|
Abort trap: 6
|
||||||
|
/Users/[...]/opt/anaconda3/envs/ldm/lib/python3.9/multiprocessing/resource_tracker.py:216: UserWarning: resource_tracker: There appear to be 1 leaked semaphore objects to clean up at shutdown
|
||||||
|
warnings.warn('resource_tracker: There appear to be %d '
|
||||||
|
```
|
||||||
|
|
||||||
|
Macs do not support autocast/mixed-precision. Supply `--full_precision` to use float32 everywhere.
|
88
README.md
88
README.md
@ -18,12 +18,12 @@ text-to-image generator. This fork supports:
|
|||||||
1. An interactive command-line interface that accepts the same prompt
|
1. An interactive command-line interface that accepts the same prompt
|
||||||
and switches as the Discord bot.
|
and switches as the Discord bot.
|
||||||
|
|
||||||
2. Support for img2img in which you provide a seed image to build on
|
2. A basic Web interface that allows you to run a local web server for
|
||||||
top of.
|
|
||||||
|
|
||||||
3. A basic Web interface that allows you to run a local web server for
|
|
||||||
generating images in your browser.
|
generating images in your browser.
|
||||||
|
|
||||||
|
3. Support for img2img in which you provide a seed image to guide the
|
||||||
|
image creation. (inpainting & masking coming soon)
|
||||||
|
|
||||||
4. A notebook for running the code on Google Colab.
|
4. A notebook for running the code on Google Colab.
|
||||||
|
|
||||||
5. Upscaling and face fixing using the optional ESRGAN and GFPGAN
|
5. Upscaling and face fixing using the optional ESRGAN and GFPGAN
|
||||||
@ -46,10 +46,14 @@ improvements and bug fixes.
|
|||||||
# Table of Contents
|
# Table of Contents
|
||||||
|
|
||||||
1. [Major Features](#features)
|
1. [Major Features](#features)
|
||||||
2. [Changelog](#latest)
|
2. [Changelog](#latest-changes)
|
||||||
3. [Installation](#installation)
|
3. [Installation](#installation)
|
||||||
|
1. [Linux](#linux)
|
||||||
|
1. [Windows](#windows)
|
||||||
|
1. [MacOS](README-Mac-MPS.md)
|
||||||
4. [Troubleshooting](#troubleshooting)
|
4. [Troubleshooting](#troubleshooting)
|
||||||
5. [Support](#support)
|
5. [Contributing](#contributing)
|
||||||
|
6. [Support](#support)
|
||||||
|
|
||||||
# Features
|
# Features
|
||||||
|
|
||||||
@ -134,6 +138,13 @@ You may also pass a -v<count> option to generate count variants on the original
|
|||||||
passing the first generated image back into img2img the requested number of times. It generates interesting
|
passing the first generated image back into img2img the requested number of times. It generates interesting
|
||||||
variants.
|
variants.
|
||||||
|
|
||||||
|
## Seamless Tiling
|
||||||
|
|
||||||
|
The seamless tiling mode causes generated images to seamlessly tile with itself. To use it, add the --seamless option when starting the script which will result in all generated images to tile, or for each dream> prompt as shown here:
|
||||||
|
```
|
||||||
|
dream> "pond garden with lotus by claude monet" --seamless -s100 -n4
|
||||||
|
```
|
||||||
|
|
||||||
## GFPGAN and Real-ESRGAN Support
|
## GFPGAN and Real-ESRGAN Support
|
||||||
|
|
||||||
The script also provides the ability to do face restoration and
|
The script also provides the ability to do face restoration and
|
||||||
@ -396,15 +407,22 @@ repository and associated paper for details and limitations.
|
|||||||
|
|
||||||
# Latest Changes
|
# Latest Changes
|
||||||
|
|
||||||
- v1.13 (in process)
|
- v1.14 (In progress)
|
||||||
|
|
||||||
|
- Add "seamless mode" for circular tiling of image. Generates beautiful effects. ([prixt](https://github.com/prixt))
|
||||||
|
|
||||||
|
- v1.13 (3 September 2022
|
||||||
|
|
||||||
|
- Support image variations (see [VARIATIONS](VARIATIONS.md) ([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers)
|
||||||
- Supports a Google Colab notebook for a standalone server running on Google hardware [Arturo Mendivil](https://github.com/artmen1516)
|
- Supports a Google Colab notebook for a standalone server running on Google hardware [Arturo Mendivil](https://github.com/artmen1516)
|
||||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling [Kevin Gibbons](https://github.com/bakkot)
|
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling [Kevin Gibbons](https://github.com/bakkot)
|
||||||
- WebUI supports incremental display of in-progress images during generation [Kevin Gibbons](https://github.com/bakkot)
|
- WebUI supports incremental display of in-progress images during generation [Kevin Gibbons](https://github.com/bakkot)
|
||||||
- Output directory can be specified on the dream> command line.
|
- A new configuration file scheme that allows new models (including upcoming stable-diffusion-v1.5)
|
||||||
- The grid was displaying duplicated images when not enough images to fill the final row [Muhammad Usama](https://github.com/SMUsamaShah)
|
to be added without altering the code. ([David Wager](https://github.com/maddavid12))
|
||||||
- Can specify --grid on dream.py command line as the default.
|
- Can specify --grid on dream.py command line as the default.
|
||||||
- Miscellaneous internal bug and stability fixes.
|
- Miscellaneous internal bug and stability fixes.
|
||||||
|
- Works on M1 Apple hardware.
|
||||||
|
- Multiple bug fixes.
|
||||||
|
|
||||||
For older changelogs, please visit **[CHANGELOGS](CHANGELOG.md)**.
|
For older changelogs, please visit **[CHANGELOGS](CHANGELOG.md)**.
|
||||||
|
|
||||||
@ -420,10 +438,12 @@ There are separate installation walkthroughs for [Linux](#linux), [Windows](#win
|
|||||||
- Python (version 3.8.5 recommended; higher may work)
|
- Python (version 3.8.5 recommended; higher may work)
|
||||||
- git
|
- git
|
||||||
|
|
||||||
2. Install the Python Anaconda environment manager using pip3.
|
2. Install the Python Anaconda environment manager.
|
||||||
|
|
||||||
```
|
```
|
||||||
~$ pip3 install anaconda
|
~$ wget https://repo.anaconda.com/archive/Anaconda3-2022.05-Linux-x86_64.sh
|
||||||
|
~$ chmod +x Anaconda3-2022.05-Linux-x86_64.sh
|
||||||
|
~$ ./Anaconda3-2022.05-Linux-x86_64.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
After installing anaconda, you should log out of your system and log back in. If the installation
|
After installing anaconda, you should log out of your system and log back in. If the installation
|
||||||
@ -510,6 +530,30 @@ This will bring your local copy into sync with the remote one.
|
|||||||
|
|
||||||
## Windows
|
## Windows
|
||||||
|
|
||||||
|
### Notebook install (semi-automated)
|
||||||
|
|
||||||
|
We have a
|
||||||
|
[Jupyter notebook](https://github.com/lstein/stable-diffusion/blob/main/Stable-Diffusion-local-Windows.ipynb)
|
||||||
|
with cell-by-cell installation steps. It will download the code in this repo as
|
||||||
|
one of the steps, so instead of cloning this repo, simply download the notebook
|
||||||
|
from the link above and load it up in VSCode (with the
|
||||||
|
appropriate extensions installed)/Jupyter/JupyterLab and start running the cells one-by-one.
|
||||||
|
|
||||||
|
Note that you will need NVIDIA drivers, Python 3.10, and Git installed
|
||||||
|
beforehand - simplified
|
||||||
|
[step-by-step instructions](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install)
|
||||||
|
are available in the wiki (you'll only need steps 1, 2, & 3 ).
|
||||||
|
|
||||||
|
### Manual installs
|
||||||
|
|
||||||
|
#### pip
|
||||||
|
|
||||||
|
See
|
||||||
|
[Easy-peasy Windows install](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install)
|
||||||
|
in the wiki
|
||||||
|
|
||||||
|
#### Conda
|
||||||
|
|
||||||
1. Install Anaconda3 (miniconda3 version) from here: https://docs.anaconda.com/anaconda/install/windows/
|
1. Install Anaconda3 (miniconda3 version) from here: https://docs.anaconda.com/anaconda/install/windows/
|
||||||
|
|
||||||
2. Install Git from here: https://git-scm.com/download/win
|
2. Install Git from here: https://git-scm.com/download/win
|
||||||
@ -730,6 +774,20 @@ of branch>
|
|||||||
You will need to go through the install procedure again, but it should
|
You will need to go through the install procedure again, but it should
|
||||||
be fast because all the dependencies are already loaded.
|
be fast because all the dependencies are already loaded.
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
Anyone who wishes to contribute to this project, whether
|
||||||
|
documentation, features, bug fixes, code cleanup, testing, or code
|
||||||
|
reviews, is very much encouraged to do so. If you are unfamiliar with
|
||||||
|
how to contribute to GitHub projects, here is a [Getting Started
|
||||||
|
Guide](https://opensource.com/article/19/7/create-pull-request-github).
|
||||||
|
|
||||||
|
A full set of contribution guidelines, along with templates, are in
|
||||||
|
progress, but for now the most important thing is to **make your pull
|
||||||
|
request against the "development" branch**, and not against
|
||||||
|
"main". This will help keep public breakage to a minimum and will
|
||||||
|
allow you to propose more radical changes.
|
||||||
|
|
||||||
# Support
|
# Support
|
||||||
|
|
||||||
For support,
|
For support,
|
||||||
@ -742,8 +800,12 @@ _Contributions by:_
|
|||||||
[Peter Kowalczyk](https://github.com/slix), [Henry Harrison](https://github.com/hwharrison),
|
[Peter Kowalczyk](https://github.com/slix), [Henry Harrison](https://github.com/hwharrison),
|
||||||
[xraxra](https://github.com/xraxra), [bmaltais](https://github.com/bmaltais), [Sean McLellan](https://github.com/Oceanswave),
|
[xraxra](https://github.com/xraxra), [bmaltais](https://github.com/bmaltais), [Sean McLellan](https://github.com/Oceanswave),
|
||||||
[nicolai256](https://github.com/nicolai256), [Benjamin Warner](https://github.com/warner-benjamin),
|
[nicolai256](https://github.com/nicolai256), [Benjamin Warner](https://github.com/warner-benjamin),
|
||||||
[tildebyte](https://github.com/tildebyte),[yunsaki](https://github.com/yunsaki)
|
[tildebyte](https://github.com/tildebyte),[yunsaki](https://github.com/yunsaki), [James Reynolds][https://github.com/magnusviri],
|
||||||
and [Tesseract Cat](https://github.com/TesseractCat)
|
[Tesseract Cat](https://github.com/TesseractCat), and many more!
|
||||||
|
|
||||||
|
(If you have contributed and don't see your name on the list of
|
||||||
|
contributors, please let lstein know about the omission, or make a
|
||||||
|
pull request)
|
||||||
|
|
||||||
Original portions of the software are Copyright (c) 2020 Lincoln D. Stein (https://github.com/lstein)
|
Original portions of the software are Copyright (c) 2020 Lincoln D. Stein (https://github.com/lstein)
|
||||||
|
|
||||||
|
265
Stable-Diffusion-local-Windows.ipynb
Normal file
265
Stable-Diffusion-local-Windows.ipynb
Normal file
@ -0,0 +1,265 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Easy-peasy Windows install"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"Note that you will need NVIDIA drivers, Python 3.10, and Git installed\n",
|
||||||
|
"beforehand - simplified\n",
|
||||||
|
"[step-by-step instructions](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install)\n",
|
||||||
|
"are available in the wiki (you'll only need steps 1, 2, & 3 )"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Run each cell in turn. In VSCode, either hit SHIFT-ENTER, or click on the little ▶️ to the left of the cell. In Jupyter/JupyterLab, you **must** hit SHIFT-ENTER"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%pip install pew"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%cmd\n",
|
||||||
|
"git clone https://github.com/lstein/stable-diffusion.git"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%cd stable-diffusion"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%writefile requirements.txt\n",
|
||||||
|
"albumentations==0.4.3\n",
|
||||||
|
"einops==0.3.0\n",
|
||||||
|
"huggingface-hub==0.8.1\n",
|
||||||
|
"imageio-ffmpeg==0.4.2\n",
|
||||||
|
"imageio==2.9.0\n",
|
||||||
|
"kornia==0.6.0\n",
|
||||||
|
"# pip will resolve the version which matches torch\n",
|
||||||
|
"numpy\n",
|
||||||
|
"omegaconf==2.1.1\n",
|
||||||
|
"opencv-python==4.6.0.66\n",
|
||||||
|
"pillow==9.2.0\n",
|
||||||
|
"pip>=22\n",
|
||||||
|
"pudb==2019.2\n",
|
||||||
|
"pytorch-lightning==1.4.2\n",
|
||||||
|
"streamlit==1.12.0\n",
|
||||||
|
"# \"CompVis/taming-transformers\" doesn't work\n",
|
||||||
|
"# ldm\\models\\autoencoder.py\", line 6, in <module>\n",
|
||||||
|
"# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer\n",
|
||||||
|
"# ModuleNotFoundError\n",
|
||||||
|
"taming-transformers-rom1504==0.0.6\n",
|
||||||
|
"test-tube>=0.7.5\n",
|
||||||
|
"torch-fidelity==0.3.0\n",
|
||||||
|
"torchmetrics==0.6.0\n",
|
||||||
|
"transformers==4.19.2\n",
|
||||||
|
"git+https://github.com/openai/CLIP.git@main#egg=clip\n",
|
||||||
|
"git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion\n",
|
||||||
|
"# No CUDA in PyPi builds\n",
|
||||||
|
"--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org\n",
|
||||||
|
"torch==1.11.0\n",
|
||||||
|
"# Same as numpy - let pip do its thing\n",
|
||||||
|
"torchvision\n",
|
||||||
|
"-e .\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%cmd\n",
|
||||||
|
"pew new --python 3.10 -r requirements.txt --dont-activate ldm"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Switch the notebook kernel to the new 'ldm' environment!\n",
|
||||||
|
"\n",
|
||||||
|
"## VSCode: restart VSCode and come back to this cell\n",
|
||||||
|
"\n",
|
||||||
|
"1. Ctrl+Shift+P\n",
|
||||||
|
"1. Type \"Select Interpreter\" and select \"Jupyter: Select Interpreter to Start Jupyter Server\"\n",
|
||||||
|
"1. VSCode will say that it needs to install packages. Click the \"Install\" button.\n",
|
||||||
|
"1. Once the install is finished, do 1 & 2 again\n",
|
||||||
|
"1. Pick 'ldm'\n",
|
||||||
|
"1. Run the following cell"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%cd stable-diffusion"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"\n",
|
||||||
|
"## Jupyter/JupyterLab\n",
|
||||||
|
"\n",
|
||||||
|
"1. Run the cell below\n",
|
||||||
|
"1. Click on the toolbar where it says \"(ipyknel)\" ↗️. You should get a pop-up asking you to \"Select Kernel\". Pick 'ldm' from the drop-down.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### DO NOT RUN THE FOLLOWING CELL IF YOU ARE USING VSCODE!!"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# DO NOT RUN THIS CELL IF YOU ARE USING VSCODE!!\n",
|
||||||
|
"%%cmd\n",
|
||||||
|
"pew workon ldm\n",
|
||||||
|
"pip3 install ipykernel\n",
|
||||||
|
"python -m ipykernel install --name=ldm"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### When running the next cell, Jupyter/JupyterLab users might get a warning saying \"IProgress not found\". This can be ignored."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%run \"scripts/preload_models.py\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%%cmd\n",
|
||||||
|
"mkdir \"models/ldm/stable-diffusion-v1\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Now copy the SD model you downloaded from Hugging Face into the above new directory, and (if necessary) rename it to 'model.ckpt'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Now go create some magic!\n",
|
||||||
|
"\n",
|
||||||
|
"VSCode\n",
|
||||||
|
"\n",
|
||||||
|
"- The actual input box for the 'dream' prompt will appear at the very top of the VSCode window. Type in your commands and hit 'ENTER'.\n",
|
||||||
|
"- To quit, hit the 'Interrupt' button in the toolbar up there ⬆️ a couple of times, then hit ENTER (you'll probably see a terrifying traceback from Python - just ignore it).\n",
|
||||||
|
"\n",
|
||||||
|
"Jupyter/JupyterLab\n",
|
||||||
|
"\n",
|
||||||
|
"- The input box for the 'dream' prompt will appear below. Type in your commands and hit 'ENTER'.\n",
|
||||||
|
"- To quit, hit the interrupt button (⏹️) in the toolbar up there ⬆️ a couple of times, then hit ENTER (you'll probably see a terrifying traceback from Python - just ignore it)."
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"%run \"scripts/dream.py\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Once this seems to be working well, you can try opening a terminal\n",
|
||||||
|
"\n",
|
||||||
|
"- VSCode: type ('CTRL+`')\n",
|
||||||
|
"- Jupyter/JupyterLab: File|New Terminal\n",
|
||||||
|
"- Or jump out of the notebook entirely, and open Powershell/Command Prompt\n",
|
||||||
|
"\n",
|
||||||
|
"Now:\n",
|
||||||
|
"\n",
|
||||||
|
"1. `cd` to wherever the 'stable-diffusion' directory is\n",
|
||||||
|
"1. Run `pew workon ldm`\n",
|
||||||
|
"1. Run `winpty python scripts\\dream.py`"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3.10.6 ('ldm')",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.10.6"
|
||||||
|
},
|
||||||
|
"vscode": {
|
||||||
|
"interpreter": {
|
||||||
|
"hash": "a05e4574567b7bc2c98f7f9aa579f9ea5b8739b54844ab610ac85881c4be2659"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
@ -48,12 +48,12 @@ variation.
|
|||||||
dream> "prompt" -n6 -S3357757885 -v0.2
|
dream> "prompt" -n6 -S3357757885 -v0.2
|
||||||
...
|
...
|
||||||
Outputs:
|
Outputs:
|
||||||
./outputs/Xena/000002.784039624.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 784039624,0.2 -S3357757885
|
./outputs/Xena/000002.784039624.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 784039624:0.2 -S3357757885
|
||||||
./outputs/Xena/000002.3647897225.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.2 -S3357757885
|
./outputs/Xena/000002.3647897225.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.2 -S3357757885
|
||||||
./outputs/Xena/000002.917731034.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 917731034,0.2 -S3357757885
|
./outputs/Xena/000002.917731034.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 917731034:0.2 -S3357757885
|
||||||
./outputs/Xena/000002.4116285959.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 4116285959,0.2 -S3357757885
|
./outputs/Xena/000002.4116285959.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 4116285959:0.2 -S3357757885
|
||||||
./outputs/Xena/000002.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 1614299449,0.2 -S3357757885
|
./outputs/Xena/000002.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 1614299449:0.2 -S3357757885
|
||||||
./outputs/Xena/000002.1335553075.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 1335553075,0.2 -S3357757885
|
./outputs/Xena/000002.1335553075.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 1335553075:0.2 -S3357757885
|
||||||
~~~
|
~~~
|
||||||
|
|
||||||
Note that the output for each image has a -V option giving the
|
Note that the output for each image has a -V option giving the
|
||||||
@ -78,7 +78,7 @@ this to work.
|
|||||||
~~~
|
~~~
|
||||||
dream> "prompt" -S3357757885 -V3647897225,0.1;1614299449,0.1
|
dream> "prompt" -S3357757885 -V3647897225,0.1;1614299449,0.1
|
||||||
Outputs:
|
Outputs:
|
||||||
./outputs/Xena/000003.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1 -S3357757885
|
./outputs/Xena/000003.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1 -S3357757885
|
||||||
~~~
|
~~~
|
||||||
|
|
||||||
Here we are providing equal weights (0.1 and 0.1) for both the
|
Here we are providing equal weights (0.1 and 0.1) for both the
|
||||||
@ -95,12 +95,12 @@ strength) options. Note that we use -n6 to generate 6 variations:
|
|||||||
~~~~
|
~~~~
|
||||||
dream> "prompt" -S3357757885 -V3647897225,0.1;1614299449,0.1 -v0.05 -n6
|
dream> "prompt" -S3357757885 -V3647897225,0.1;1614299449,0.1 -v0.05 -n6
|
||||||
Outputs:
|
Outputs:
|
||||||
./outputs/Xena/000004.3279757577.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1;3279757577,0.05 -S3357757885
|
./outputs/Xena/000004.3279757577.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,3279757577:0.05 -S3357757885
|
||||||
./outputs/Xena/000004.2853129515.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1;2853129515,0.05 -S3357757885
|
./outputs/Xena/000004.2853129515.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2853129515:0.05 -S3357757885
|
||||||
./outputs/Xena/000004.3747154981.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1;3747154981,0.05 -S3357757885
|
./outputs/Xena/000004.3747154981.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,3747154981:0.05 -S3357757885
|
||||||
./outputs/Xena/000004.2664260391.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1;2664260391,0.05 -S3357757885
|
./outputs/Xena/000004.2664260391.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2664260391:0.05 -S3357757885
|
||||||
./outputs/Xena/000004.1642517170.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1;1642517170,0.05 -S3357757885
|
./outputs/Xena/000004.1642517170.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,1642517170:0.05 -S3357757885
|
||||||
./outputs/Xena/000004.2183375608.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225,0.1;1614299449,0.1;2183375608,0.05 -S3357757885
|
./outputs/Xena/000004.2183375608.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2183375608:0.05 -S3357757885
|
||||||
~~~~
|
~~~~
|
||||||
|
|
||||||
This produces six images, all slight variations on the combination of
|
This produces six images, all slight variations on the combination of
|
||||||
@ -108,6 +108,6 @@ the chosen two images. Here's the one I like best:
|
|||||||
|
|
||||||
<img src="static/variation_walkthru/000004.3747154981.png">
|
<img src="static/variation_walkthru/000004.3747154981.png">
|
||||||
|
|
||||||
As you can see, this is a very powerful too, which when combined with
|
As you can see, this is a very powerful tool, which when combined with
|
||||||
subprompt weighting, gives you great control over the content and
|
subprompt weighting, gives you great control over the content and
|
||||||
quality of your generated images.
|
quality of your generated images.
|
||||||
|
18
configs/models.yaml
Normal file
18
configs/models.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# This file describes the alternative machine learning models
|
||||||
|
# available to the dream script.
|
||||||
|
#
|
||||||
|
# To add a new model, follow the examples below. Each
|
||||||
|
# model requires a model config file, a weights file,
|
||||||
|
# and the width and height of the images it
|
||||||
|
# was trained on.
|
||||||
|
|
||||||
|
laion400m:
|
||||||
|
config: configs/latent-diffusion/txt2img-1p4B-eval.yaml
|
||||||
|
weights: models/ldm/text2img-large/model.ckpt
|
||||||
|
width: 256
|
||||||
|
height: 256
|
||||||
|
stable-diffusion-1.4:
|
||||||
|
config: configs/stable-diffusion/v1-inference.yaml
|
||||||
|
weights: models/ldm/stable-diffusion-v1/model.ckpt
|
||||||
|
width: 512
|
||||||
|
height: 512
|
@ -1,33 +1,29 @@
|
|||||||
name: ldm
|
name: ldm
|
||||||
channels:
|
channels:
|
||||||
- pytorch-nightly
|
- pytorch
|
||||||
- conda-forge
|
- conda-forge
|
||||||
dependencies:
|
dependencies:
|
||||||
- python==3.9.13
|
- python==3.10.5
|
||||||
- pip==22.2.2
|
- pip==22.2.2
|
||||||
|
|
||||||
# pytorch-nightly, left unpinned
|
# pytorch left unpinned
|
||||||
- pytorch
|
- pytorch
|
||||||
- torchmetrics
|
|
||||||
- torchvision
|
- torchvision
|
||||||
|
|
||||||
# I suggest to keep the other deps sorted for convenience.
|
# I suggest to keep the other deps sorted for convenience.
|
||||||
# If you wish to upgrade to 3.10, try to run this:
|
# To determine what the latest versions should be, run:
|
||||||
#
|
#
|
||||||
# ```shell
|
# ```shell
|
||||||
# CONDA_CMD=conda
|
# sed -E 's/ldm/ldm-updated/;20,99s/- ([^=]+)==.+/- \1/' environment-mac.yaml > environment-mac-updated.yml
|
||||||
# sed -E 's/python==3.9.13/python==3.10.5/;s/ldm/ldm-3.10/;21,99s/- ([^=]+)==.+/- \1/' environment-mac.yaml > /tmp/environment-mac-updated.yml
|
# CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac-updated.yml && conda list -n ldm-updated | awk ' {print " - " $1 "==" $2;} '
|
||||||
# CONDA_SUBDIR=osx-arm64 $CONDA_CMD env create -f /tmp/environment-mac-updated.yml && $CONDA_CMD list -n ldm-3.10 | awk ' {print " - " $1 "==" $2;} '
|
|
||||||
# ```
|
# ```
|
||||||
#
|
|
||||||
# Unfortunately, as of 2022-08-31, this fails at the pip stage.
|
|
||||||
- albumentations==1.2.1
|
- albumentations==1.2.1
|
||||||
- coloredlogs==15.0.1
|
- coloredlogs==15.0.1
|
||||||
- einops==0.4.1
|
- einops==0.4.1
|
||||||
- grpcio==1.46.4
|
- grpcio==1.46.4
|
||||||
- humanfriendly
|
- humanfriendly==10.0
|
||||||
- imageio-ffmpeg==0.4.7
|
|
||||||
- imageio==2.21.2
|
- imageio==2.21.2
|
||||||
|
- imageio-ffmpeg==0.4.7
|
||||||
- imgaug==0.4.0
|
- imgaug==0.4.0
|
||||||
- kornia==0.6.7
|
- kornia==0.6.7
|
||||||
- mpmath==1.2.1
|
- mpmath==1.2.1
|
||||||
@ -43,13 +39,11 @@ dependencies:
|
|||||||
- streamlit==1.12.2
|
- streamlit==1.12.2
|
||||||
- sympy==1.10.1
|
- sympy==1.10.1
|
||||||
- tensorboard==2.9.0
|
- tensorboard==2.9.0
|
||||||
- transformers==4.21.2
|
- torchmetrics==0.9.3
|
||||||
- pip:
|
- pip:
|
||||||
- invisible-watermark
|
- test-tube==0.7.5
|
||||||
- test-tube
|
- transformers==4.21.2
|
||||||
- tokenizers
|
- torch-fidelity==0.3.0
|
||||||
- torch-fidelity
|
|
||||||
- -e git+https://github.com/huggingface/diffusers.git@v0.2.4#egg=diffusers
|
|
||||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
|
96
ldm/dream/conditioning.py
Normal file
96
ldm/dream/conditioning.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
'''
|
||||||
|
This module handles the generation of the conditioning tensors, including management of
|
||||||
|
weighted subprompts.
|
||||||
|
|
||||||
|
Useful function exports:
|
||||||
|
|
||||||
|
get_uc_and_c() get the conditioned and unconditioned latent
|
||||||
|
split_weighted_subpromopts() split subprompts, normalize and weight them
|
||||||
|
log_tokenization() print out colour-coded tokens and warn if truncated
|
||||||
|
|
||||||
|
'''
|
||||||
|
import re
|
||||||
|
import torch
|
||||||
|
|
||||||
|
def get_uc_and_c(prompt, model, log_tokens=False, skip_normalize=False):
|
||||||
|
uc = model.get_learned_conditioning([''])
|
||||||
|
|
||||||
|
# get weighted sub-prompts
|
||||||
|
weighted_subprompts = split_weighted_subprompts(
|
||||||
|
prompt, skip_normalize
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(weighted_subprompts) > 1:
|
||||||
|
# i dont know if this is correct.. but it works
|
||||||
|
c = torch.zeros_like(uc)
|
||||||
|
# normalize each "sub prompt" and add it
|
||||||
|
for subprompt, weight in weighted_subprompts:
|
||||||
|
log_tokenization(subprompt, model, log_tokens)
|
||||||
|
c = torch.add(
|
||||||
|
c,
|
||||||
|
model.get_learned_conditioning([subprompt]),
|
||||||
|
alpha=weight,
|
||||||
|
)
|
||||||
|
else: # just standard 1 prompt
|
||||||
|
log_tokenization(prompt, model, log_tokens)
|
||||||
|
c = model.get_learned_conditioning([prompt])
|
||||||
|
return (uc, c)
|
||||||
|
|
||||||
|
def split_weighted_subprompts(text, skip_normalize=False)->list:
|
||||||
|
"""
|
||||||
|
grabs all text up to the first occurrence of ':'
|
||||||
|
uses the grabbed text as a sub-prompt, and takes the value following ':' as weight
|
||||||
|
if ':' has no value defined, defaults to 1.0
|
||||||
|
repeats until no text remaining
|
||||||
|
"""
|
||||||
|
prompt_parser = re.compile("""
|
||||||
|
(?P<prompt> # capture group for 'prompt'
|
||||||
|
(?:\\\:|[^:])+ # match one or more non ':' characters or escaped colons '\:'
|
||||||
|
) # end 'prompt'
|
||||||
|
(?: # non-capture group
|
||||||
|
:+ # match one or more ':' characters
|
||||||
|
(?P<weight> # capture group for 'weight'
|
||||||
|
-?\d+(?:\.\d+)? # match positive or negative integer or decimal number
|
||||||
|
)? # end weight capture group, make optional
|
||||||
|
\s* # strip spaces after weight
|
||||||
|
| # OR
|
||||||
|
$ # else, if no ':' then match end of line
|
||||||
|
) # end non-capture group
|
||||||
|
""", re.VERBOSE)
|
||||||
|
parsed_prompts = [(match.group("prompt").replace("\\:", ":"), float(
|
||||||
|
match.group("weight") or 1)) for match in re.finditer(prompt_parser, text)]
|
||||||
|
if skip_normalize:
|
||||||
|
return parsed_prompts
|
||||||
|
weight_sum = sum(map(lambda x: x[1], parsed_prompts))
|
||||||
|
if weight_sum == 0:
|
||||||
|
print(
|
||||||
|
"Warning: Subprompt weights add up to zero. Discarding and using even weights instead.")
|
||||||
|
equal_weight = 1 / len(parsed_prompts)
|
||||||
|
return [(x[0], equal_weight) for x in parsed_prompts]
|
||||||
|
return [(x[0], x[1] / weight_sum) for x in parsed_prompts]
|
||||||
|
|
||||||
|
# shows how the prompt is tokenized
|
||||||
|
# usually tokens have '</w>' to indicate end-of-word,
|
||||||
|
# but for readability it has been replaced with ' '
|
||||||
|
def log_tokenization(text, model, log=False):
|
||||||
|
if not log:
|
||||||
|
return
|
||||||
|
tokens = model.cond_stage_model.tokenizer._tokenize(text)
|
||||||
|
tokenized = ""
|
||||||
|
discarded = ""
|
||||||
|
usedTokens = 0
|
||||||
|
totalTokens = len(tokens)
|
||||||
|
for i in range(0, totalTokens):
|
||||||
|
token = tokens[i].replace('</w>', ' ')
|
||||||
|
# alternate color
|
||||||
|
s = (usedTokens % 6) + 1
|
||||||
|
if i < model.cond_stage_model.max_length:
|
||||||
|
tokenized = tokenized + f"\x1b[0;3{s};40m{token}"
|
||||||
|
usedTokens += 1
|
||||||
|
else: # over max token length
|
||||||
|
discarded = discarded + f"\x1b[0;3{s};40m{token}"
|
||||||
|
print(f"\n>> Tokens ({usedTokens}):\n{tokenized}\x1b[0m")
|
||||||
|
if discarded != "":
|
||||||
|
print(
|
||||||
|
f">> Tokens Discarded ({totalTokens-usedTokens}):\n{discarded}\x1b[0m"
|
||||||
|
)
|
@ -1,4 +1,6 @@
|
|||||||
import torch
|
import torch
|
||||||
|
from torch import autocast
|
||||||
|
from contextlib import contextmanager, nullcontext
|
||||||
|
|
||||||
def choose_torch_device() -> str:
|
def choose_torch_device() -> str:
|
||||||
'''Convenience routine for guessing which GPU device to run model on'''
|
'''Convenience routine for guessing which GPU device to run model on'''
|
||||||
@ -8,10 +10,11 @@ def choose_torch_device() -> str:
|
|||||||
return 'mps'
|
return 'mps'
|
||||||
return 'cpu'
|
return 'cpu'
|
||||||
|
|
||||||
def choose_autocast_device(device) -> str:
|
def choose_autocast_device(device):
|
||||||
'''Returns an autocast compatible device from a torch device'''
|
'''Returns an autocast compatible device from a torch device'''
|
||||||
device_type = device.type # this returns 'mps' on M1
|
device_type = device.type # this returns 'mps' on M1
|
||||||
# autocast only supports cuda or cpu
|
# autocast only supports cuda or cpu
|
||||||
if device_type not in ('cuda','cpu'):
|
if device_type in ('cuda','cpu'):
|
||||||
return 'cpu'
|
return device_type,autocast
|
||||||
return device_type
|
else:
|
||||||
|
return 'cpu',nullcontext
|
||||||
|
4
ldm/dream/generator/__init__.py
Normal file
4
ldm/dream/generator/__init__.py
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
'''
|
||||||
|
Initialization file for the ldm.dream.generator package
|
||||||
|
'''
|
||||||
|
from .base import Generator
|
158
ldm/dream/generator/base.py
Normal file
158
ldm/dream/generator/base.py
Normal file
@ -0,0 +1,158 @@
|
|||||||
|
'''
|
||||||
|
Base class for ldm.dream.generator.*
|
||||||
|
including img2img, txt2img, and inpaint
|
||||||
|
'''
|
||||||
|
import torch
|
||||||
|
import numpy as np
|
||||||
|
import random
|
||||||
|
from tqdm import tqdm, trange
|
||||||
|
from PIL import Image
|
||||||
|
from einops import rearrange, repeat
|
||||||
|
from pytorch_lightning import seed_everything
|
||||||
|
from ldm.dream.devices import choose_autocast_device
|
||||||
|
|
||||||
|
downsampling = 8
|
||||||
|
|
||||||
|
class Generator():
|
||||||
|
def __init__(self,model):
|
||||||
|
self.model = model
|
||||||
|
self.seed = None
|
||||||
|
self.latent_channels = model.channels
|
||||||
|
self.downsampling_factor = downsampling # BUG: should come from model or config
|
||||||
|
self.variation_amount = 0
|
||||||
|
self.with_variations = []
|
||||||
|
|
||||||
|
# this is going to be overridden in img2img.py, txt2img.py and inpaint.py
|
||||||
|
def get_make_image(self,prompt,**kwargs):
|
||||||
|
"""
|
||||||
|
Returns a function returning an image derived from the prompt and the initial image
|
||||||
|
Return value depends on the seed at the time you call it
|
||||||
|
"""
|
||||||
|
raise NotImplementedError("image_iterator() must be implemented in a descendent class")
|
||||||
|
|
||||||
|
def set_variation(self, seed, variation_amount, with_variations):
|
||||||
|
self.seed = seed
|
||||||
|
self.variation_amount = variation_amount
|
||||||
|
self.with_variations = with_variations
|
||||||
|
|
||||||
|
def generate(self,prompt,init_image,width,height,iterations=1,seed=None,
|
||||||
|
image_callback=None, step_callback=None,
|
||||||
|
**kwargs):
|
||||||
|
device_type,scope = choose_autocast_device(self.model.device)
|
||||||
|
make_image = self.get_make_image(
|
||||||
|
prompt,
|
||||||
|
init_image = init_image,
|
||||||
|
width = width,
|
||||||
|
height = height,
|
||||||
|
step_callback = step_callback,
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
seed = seed if seed else self.new_seed()
|
||||||
|
seed, initial_noise = self.generate_initial_noise(seed, width, height)
|
||||||
|
with scope(device_type), self.model.ema_scope():
|
||||||
|
for n in trange(iterations, desc='Generating'):
|
||||||
|
x_T = None
|
||||||
|
if self.variation_amount > 0:
|
||||||
|
seed_everything(seed)
|
||||||
|
target_noise = self.get_noise(width,height)
|
||||||
|
x_T = self.slerp(self.variation_amount, initial_noise, target_noise)
|
||||||
|
elif initial_noise is not None:
|
||||||
|
# i.e. we specified particular variations
|
||||||
|
x_T = initial_noise
|
||||||
|
else:
|
||||||
|
seed_everything(seed)
|
||||||
|
if self.model.device.type == 'mps':
|
||||||
|
x_T = self.get_noise(width,height)
|
||||||
|
|
||||||
|
# make_image will do the equivalent of get_noise itself
|
||||||
|
image = make_image(x_T)
|
||||||
|
results.append([image, seed])
|
||||||
|
if image_callback is not None:
|
||||||
|
image_callback(image, seed)
|
||||||
|
seed = self.new_seed()
|
||||||
|
return results
|
||||||
|
|
||||||
|
def sample_to_image(self,samples):
|
||||||
|
"""
|
||||||
|
Returns a function returning an image derived from the prompt and the initial image
|
||||||
|
Return value depends on the seed at the time you call it
|
||||||
|
"""
|
||||||
|
x_samples = self.model.decode_first_stage(samples)
|
||||||
|
x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
|
||||||
|
if len(x_samples) != 1:
|
||||||
|
raise Exception(
|
||||||
|
f'>> expected to get a single image, but got {len(x_samples)}')
|
||||||
|
x_sample = 255.0 * rearrange(
|
||||||
|
x_samples[0].cpu().numpy(), 'c h w -> h w c'
|
||||||
|
)
|
||||||
|
return Image.fromarray(x_sample.astype(np.uint8))
|
||||||
|
|
||||||
|
def generate_initial_noise(self, seed, width, height):
|
||||||
|
initial_noise = None
|
||||||
|
if self.variation_amount > 0 or len(self.with_variations) > 0:
|
||||||
|
# use fixed initial noise plus random noise per iteration
|
||||||
|
seed_everything(seed)
|
||||||
|
initial_noise = self.get_noise(width,height)
|
||||||
|
for v_seed, v_weight in self.with_variations:
|
||||||
|
seed = v_seed
|
||||||
|
seed_everything(seed)
|
||||||
|
next_noise = self.get_noise(width,height)
|
||||||
|
initial_noise = self.slerp(v_weight, initial_noise, next_noise)
|
||||||
|
if self.variation_amount > 0:
|
||||||
|
random.seed() # reset RNG to an actually random state, so we can get a random seed for variations
|
||||||
|
seed = random.randrange(0,np.iinfo(np.uint32).max)
|
||||||
|
return (seed, initial_noise)
|
||||||
|
else:
|
||||||
|
return (seed, None)
|
||||||
|
|
||||||
|
# returns a tensor filled with random numbers from a normal distribution
|
||||||
|
def get_noise(self,width,height):
|
||||||
|
"""
|
||||||
|
Returns a tensor filled with random numbers, either form a normal distribution
|
||||||
|
(txt2img) or from the latent image (img2img, inpaint)
|
||||||
|
"""
|
||||||
|
raise NotImplementedError("get_noise() must be implemented in a descendent class")
|
||||||
|
|
||||||
|
def new_seed(self):
|
||||||
|
self.seed = random.randrange(0, np.iinfo(np.uint32).max)
|
||||||
|
return self.seed
|
||||||
|
|
||||||
|
def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995):
|
||||||
|
'''
|
||||||
|
Spherical linear interpolation
|
||||||
|
Args:
|
||||||
|
t (float/np.ndarray): Float value between 0.0 and 1.0
|
||||||
|
v0 (np.ndarray): Starting vector
|
||||||
|
v1 (np.ndarray): Final vector
|
||||||
|
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
||||||
|
colineal. Not recommended to alter this.
|
||||||
|
Returns:
|
||||||
|
v2 (np.ndarray): Interpolation vector between v0 and v1
|
||||||
|
'''
|
||||||
|
inputs_are_torch = False
|
||||||
|
if not isinstance(v0, np.ndarray):
|
||||||
|
inputs_are_torch = True
|
||||||
|
v0 = v0.detach().cpu().numpy()
|
||||||
|
if not isinstance(v1, np.ndarray):
|
||||||
|
inputs_are_torch = True
|
||||||
|
v1 = v1.detach().cpu().numpy()
|
||||||
|
|
||||||
|
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
||||||
|
if np.abs(dot) > DOT_THRESHOLD:
|
||||||
|
v2 = (1 - t) * v0 + t * v1
|
||||||
|
else:
|
||||||
|
theta_0 = np.arccos(dot)
|
||||||
|
sin_theta_0 = np.sin(theta_0)
|
||||||
|
theta_t = theta_0 * t
|
||||||
|
sin_theta_t = np.sin(theta_t)
|
||||||
|
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
||||||
|
s1 = sin_theta_t / sin_theta_0
|
||||||
|
v2 = s0 * v0 + s1 * v1
|
||||||
|
|
||||||
|
if inputs_are_torch:
|
||||||
|
v2 = torch.from_numpy(v2).to(self.model.device)
|
||||||
|
|
||||||
|
return v2
|
||||||
|
|
72
ldm/dream/generator/img2img.py
Normal file
72
ldm/dream/generator/img2img.py
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
'''
|
||||||
|
ldm.dream.generator.txt2img descends from ldm.dream.generator
|
||||||
|
'''
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import numpy as np
|
||||||
|
from ldm.dream.devices import choose_autocast_device
|
||||||
|
from ldm.dream.generator.base import Generator
|
||||||
|
from ldm.models.diffusion.ddim import DDIMSampler
|
||||||
|
|
||||||
|
class Img2Img(Generator):
|
||||||
|
def __init__(self,model):
|
||||||
|
super().__init__(model)
|
||||||
|
self.init_latent = None # by get_noise()
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
|
||||||
|
conditioning,init_image,strength,step_callback=None,**kwargs):
|
||||||
|
"""
|
||||||
|
Returns a function returning an image derived from the prompt and the initial image
|
||||||
|
Return value depends on the seed at the time you call it.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# PLMS sampler not supported yet, so ignore previous sampler
|
||||||
|
if not isinstance(sampler,DDIMSampler):
|
||||||
|
print(
|
||||||
|
f">> sampler '{sampler.__class__.__name__}' is not yet supported. Using DDIM sampler"
|
||||||
|
)
|
||||||
|
sampler = DDIMSampler(self.model, device=self.model.device)
|
||||||
|
|
||||||
|
sampler.make_schedule(
|
||||||
|
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
||||||
|
)
|
||||||
|
|
||||||
|
device_type,scope = choose_autocast_device(self.model.device)
|
||||||
|
with scope(device_type):
|
||||||
|
self.init_latent = self.model.get_first_stage_encoding(
|
||||||
|
self.model.encode_first_stage(init_image)
|
||||||
|
) # move to latent space
|
||||||
|
|
||||||
|
t_enc = int(strength * steps)
|
||||||
|
uc, c = conditioning
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def make_image(x_T):
|
||||||
|
# encode (scaled latent)
|
||||||
|
z_enc = sampler.stochastic_encode(
|
||||||
|
self.init_latent,
|
||||||
|
torch.tensor([t_enc]).to(self.model.device),
|
||||||
|
noise=x_T
|
||||||
|
)
|
||||||
|
# decode it
|
||||||
|
samples = sampler.decode(
|
||||||
|
z_enc,
|
||||||
|
c,
|
||||||
|
t_enc,
|
||||||
|
img_callback = step_callback,
|
||||||
|
unconditional_guidance_scale=cfg_scale,
|
||||||
|
unconditional_conditioning=uc,
|
||||||
|
)
|
||||||
|
return self.sample_to_image(samples)
|
||||||
|
|
||||||
|
return make_image
|
||||||
|
|
||||||
|
def get_noise(self,width,height):
|
||||||
|
device = self.model.device
|
||||||
|
init_latent = self.init_latent
|
||||||
|
assert init_latent is not None,'call to get_noise() when init_latent not set'
|
||||||
|
if device.type == 'mps':
|
||||||
|
return torch.randn_like(init_latent, device='cpu').to(device)
|
||||||
|
else:
|
||||||
|
return torch.randn_like(init_latent, device=device)
|
76
ldm/dream/generator/inpaint.py
Normal file
76
ldm/dream/generator/inpaint.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
'''
|
||||||
|
ldm.dream.generator.inpaint descends from ldm.dream.generator
|
||||||
|
'''
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import numpy as np
|
||||||
|
from einops import rearrange, repeat
|
||||||
|
from ldm.dream.devices import choose_autocast_device
|
||||||
|
from ldm.dream.generator.img2img import Img2Img
|
||||||
|
from ldm.models.diffusion.ddim import DDIMSampler
|
||||||
|
|
||||||
|
class Inpaint(Img2Img):
|
||||||
|
def __init__(self,model):
|
||||||
|
super().__init__(model)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
|
||||||
|
conditioning,init_image,init_mask,strength,
|
||||||
|
step_callback=None,**kwargs):
|
||||||
|
"""
|
||||||
|
Returns a function returning an image derived from the prompt and
|
||||||
|
the initial image + mask. Return value depends on the seed at
|
||||||
|
the time you call it. kwargs are 'init_latent' and 'strength'
|
||||||
|
"""
|
||||||
|
|
||||||
|
init_mask = init_mask[0][0].unsqueeze(0).repeat(4,1,1).unsqueeze(0)
|
||||||
|
init_mask = repeat(init_mask, '1 ... -> b ...', b=1)
|
||||||
|
|
||||||
|
# PLMS sampler not supported yet, so ignore previous sampler
|
||||||
|
if not isinstance(sampler,DDIMSampler):
|
||||||
|
print(
|
||||||
|
f">> sampler '{sampler.__class__.__name__}' is not yet supported. Using DDIM sampler"
|
||||||
|
)
|
||||||
|
sampler = DDIMSampler(self.model, device=self.model.device)
|
||||||
|
|
||||||
|
sampler.make_schedule(
|
||||||
|
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
||||||
|
)
|
||||||
|
|
||||||
|
device_type,scope = choose_autocast_device(self.model.device)
|
||||||
|
with scope(device_type):
|
||||||
|
self.init_latent = self.model.get_first_stage_encoding(
|
||||||
|
self.model.encode_first_stage(init_image)
|
||||||
|
) # move to latent space
|
||||||
|
|
||||||
|
t_enc = int(strength * steps)
|
||||||
|
uc, c = conditioning
|
||||||
|
|
||||||
|
print(f">> target t_enc is {t_enc} steps")
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def make_image(x_T):
|
||||||
|
# encode (scaled latent)
|
||||||
|
z_enc = sampler.stochastic_encode(
|
||||||
|
self.init_latent,
|
||||||
|
torch.tensor([t_enc]).to(self.model.device),
|
||||||
|
noise=x_T
|
||||||
|
)
|
||||||
|
|
||||||
|
# decode it
|
||||||
|
samples = sampler.decode(
|
||||||
|
z_enc,
|
||||||
|
c,
|
||||||
|
t_enc,
|
||||||
|
img_callback = step_callback,
|
||||||
|
unconditional_guidance_scale = cfg_scale,
|
||||||
|
unconditional_conditioning = uc,
|
||||||
|
mask = init_mask,
|
||||||
|
init_latent = self.init_latent
|
||||||
|
)
|
||||||
|
return self.sample_to_image(samples)
|
||||||
|
|
||||||
|
return make_image
|
||||||
|
|
||||||
|
|
||||||
|
|
61
ldm/dream/generator/txt2img.py
Normal file
61
ldm/dream/generator/txt2img.py
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
'''
|
||||||
|
ldm.dream.generator.txt2img inherits from ldm.dream.generator
|
||||||
|
'''
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import numpy as np
|
||||||
|
from ldm.dream.generator.base import Generator
|
||||||
|
|
||||||
|
class Txt2Img(Generator):
|
||||||
|
def __init__(self,model):
|
||||||
|
super().__init__(model)
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
|
||||||
|
conditioning,width,height,step_callback=None,**kwargs):
|
||||||
|
"""
|
||||||
|
Returns a function returning an image derived from the prompt and the initial image
|
||||||
|
Return value depends on the seed at the time you call it
|
||||||
|
kwargs are 'width' and 'height'
|
||||||
|
"""
|
||||||
|
uc, c = conditioning
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def make_image(x_T):
|
||||||
|
shape = [
|
||||||
|
self.latent_channels,
|
||||||
|
height // self.downsampling_factor,
|
||||||
|
width // self.downsampling_factor,
|
||||||
|
]
|
||||||
|
samples, _ = sampler.sample(
|
||||||
|
batch_size = 1,
|
||||||
|
S = steps,
|
||||||
|
x_T = x_T,
|
||||||
|
conditioning = c,
|
||||||
|
shape = shape,
|
||||||
|
verbose = False,
|
||||||
|
unconditional_guidance_scale = cfg_scale,
|
||||||
|
unconditional_conditioning = uc,
|
||||||
|
eta = ddim_eta,
|
||||||
|
img_callback = step_callback
|
||||||
|
)
|
||||||
|
return self.sample_to_image(samples)
|
||||||
|
|
||||||
|
return make_image
|
||||||
|
|
||||||
|
|
||||||
|
# returns a tensor filled with random numbers from a normal distribution
|
||||||
|
def get_noise(self,width,height):
|
||||||
|
device = self.model.device
|
||||||
|
if device.type == 'mps':
|
||||||
|
return torch.randn([1,
|
||||||
|
self.latent_channels,
|
||||||
|
height // self.downsampling_factor,
|
||||||
|
width // self.downsampling_factor],
|
||||||
|
device='cpu').to(device)
|
||||||
|
else:
|
||||||
|
return torch.randn([1,
|
||||||
|
self.latent_channels,
|
||||||
|
height // self.downsampling_factor,
|
||||||
|
width // self.downsampling_factor],
|
||||||
|
device=device)
|
@ -59,8 +59,16 @@ class PromptFormatter:
|
|||||||
switches.append(f'-H{opt.height or t2i.height}')
|
switches.append(f'-H{opt.height or t2i.height}')
|
||||||
switches.append(f'-C{opt.cfg_scale or t2i.cfg_scale}')
|
switches.append(f'-C{opt.cfg_scale or t2i.cfg_scale}')
|
||||||
switches.append(f'-A{opt.sampler_name or t2i.sampler_name}')
|
switches.append(f'-A{opt.sampler_name or t2i.sampler_name}')
|
||||||
|
# to do: put model name into the t2i object
|
||||||
|
# switches.append(f'--model{t2i.model_name}')
|
||||||
|
if opt.invert_mask:
|
||||||
|
switches.append(f'--invert_mask')
|
||||||
|
if opt.seamless or t2i.seamless:
|
||||||
|
switches.append(f'--seamless')
|
||||||
if opt.init_img:
|
if opt.init_img:
|
||||||
switches.append(f'-I{opt.init_img}')
|
switches.append(f'-I{opt.init_img}')
|
||||||
|
if opt.mask:
|
||||||
|
switches.append(f'-M{opt.mask}')
|
||||||
if opt.fit:
|
if opt.fit:
|
||||||
switches.append(f'--fit')
|
switches.append(f'--fit')
|
||||||
if opt.strength and opt.init_img is not None:
|
if opt.strength and opt.init_img is not None:
|
||||||
@ -70,10 +78,10 @@ class PromptFormatter:
|
|||||||
if opt.upscale:
|
if opt.upscale:
|
||||||
switches.append(f'-U {" ".join([str(u) for u in opt.upscale])}')
|
switches.append(f'-U {" ".join([str(u) for u in opt.upscale])}')
|
||||||
if opt.variation_amount > 0:
|
if opt.variation_amount > 0:
|
||||||
switches.append(f'-v {opt.variation_amount}')
|
switches.append(f'-v{opt.variation_amount}')
|
||||||
if opt.with_variations:
|
if opt.with_variations:
|
||||||
formatted_variations = ';'.join(f'{seed},{weight}' for seed, weight in opt.with_variations)
|
formatted_variations = ','.join(f'{seed}:{weight}' for seed, weight in opt.with_variations)
|
||||||
switches.append(f'-V {formatted_variations}')
|
switches.append(f'-V{formatted_variations}')
|
||||||
if t2i.full_precision:
|
if t2i.full_precision:
|
||||||
switches.append('-F')
|
switches.append('-F')
|
||||||
return ' '.join(switches)
|
return ' '.join(switches)
|
||||||
|
@ -22,7 +22,7 @@ class Completer:
|
|||||||
def complete(self, text, state):
|
def complete(self, text, state):
|
||||||
buffer = readline.get_line_buffer()
|
buffer = readline.get_line_buffer()
|
||||||
|
|
||||||
if text.startswith(('-I', '--init_img')):
|
if text.startswith(('-I', '--init_img','-M','--init_mask')):
|
||||||
return self._path_completions(text, state, ('.png','.jpg','.jpeg'))
|
return self._path_completions(text, state, ('.png','.jpg','.jpeg'))
|
||||||
|
|
||||||
if buffer.strip().endswith('cd') or text.startswith(('.', '/')):
|
if buffer.strip().endswith('cd') or text.startswith(('.', '/')):
|
||||||
@ -48,10 +48,15 @@ class Completer:
|
|||||||
|
|
||||||
def _path_completions(self, text, state, extensions):
|
def _path_completions(self, text, state, extensions):
|
||||||
# get the path so far
|
# get the path so far
|
||||||
|
# TODO: replace this mess with a regular expression match
|
||||||
if text.startswith('-I'):
|
if text.startswith('-I'):
|
||||||
path = text.replace('-I', '', 1).lstrip()
|
path = text.replace('-I', '', 1).lstrip()
|
||||||
elif text.startswith('--init_img='):
|
elif text.startswith('--init_img='):
|
||||||
path = text.replace('--init_img=', '', 1).lstrip()
|
path = text.replace('--init_img=', '', 1).lstrip()
|
||||||
|
elif text.startswith('--init_mask='):
|
||||||
|
path = text.replace('--init_mask=', '', 1).lstrip()
|
||||||
|
elif text.startswith('-M'):
|
||||||
|
path = text.replace('-M', '', 1).lstrip()
|
||||||
else:
|
else:
|
||||||
path = text
|
path = text
|
||||||
|
|
||||||
@ -94,6 +99,7 @@ if readline_available:
|
|||||||
'--grid','-g',
|
'--grid','-g',
|
||||||
'--individual','-i',
|
'--individual','-i',
|
||||||
'--init_img','-I',
|
'--init_img','-I',
|
||||||
|
'--init_mask','-M',
|
||||||
'--strength','-f',
|
'--strength','-f',
|
||||||
'--variants','-v',
|
'--variants','-v',
|
||||||
'--outdir','-o',
|
'--outdir','-o',
|
||||||
|
@ -11,6 +11,7 @@ class CanceledException(Exception):
|
|||||||
|
|
||||||
class DreamServer(BaseHTTPRequestHandler):
|
class DreamServer(BaseHTTPRequestHandler):
|
||||||
model = None
|
model = None
|
||||||
|
outdir = None
|
||||||
canceled = Event()
|
canceled = Event()
|
||||||
|
|
||||||
def do_GET(self):
|
def do_GET(self):
|
||||||
@ -70,7 +71,8 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
steps = int(post_data['steps'])
|
steps = int(post_data['steps'])
|
||||||
width = int(post_data['width'])
|
width = int(post_data['width'])
|
||||||
height = int(post_data['height'])
|
height = int(post_data['height'])
|
||||||
fit = 'fit' in post_data
|
fit = 'fit' in post_data
|
||||||
|
seamless = 'seamless' in post_data
|
||||||
cfgscale = float(post_data['cfgscale'])
|
cfgscale = float(post_data['cfgscale'])
|
||||||
sampler_name = post_data['sampler']
|
sampler_name = post_data['sampler']
|
||||||
gfpgan_strength = float(post_data['gfpgan_strength']) if gfpgan_model_exists else 0
|
gfpgan_strength = float(post_data['gfpgan_strength']) if gfpgan_model_exists else 0
|
||||||
@ -88,11 +90,11 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
# across images generated by each call to prompt2img(), so we define it in
|
# across images generated by each call to prompt2img(), so we define it in
|
||||||
# the outer scope of image_done()
|
# the outer scope of image_done()
|
||||||
config = post_data.copy() # Shallow copy
|
config = post_data.copy() # Shallow copy
|
||||||
config['initimg'] = ''
|
config['initimg'] = config.pop('initimg_name','')
|
||||||
|
|
||||||
images_generated = 0 # helps keep track of when upscaling is started
|
images_generated = 0 # helps keep track of when upscaling is started
|
||||||
images_upscaled = 0 # helps keep track of when upscaling is completed
|
images_upscaled = 0 # helps keep track of when upscaling is completed
|
||||||
pngwriter = PngWriter("./outputs/img-samples/")
|
pngwriter = PngWriter(self.outdir)
|
||||||
|
|
||||||
prefix = pngwriter.unique_prefix()
|
prefix = pngwriter.unique_prefix()
|
||||||
# if upscaling is requested, then this will be called twice, once when
|
# if upscaling is requested, then this will be called twice, once when
|
||||||
@ -105,7 +107,7 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
|
|
||||||
# Append post_data to log, but only once!
|
# Append post_data to log, but only once!
|
||||||
if not upscaled:
|
if not upscaled:
|
||||||
with open("./outputs/img-samples/dream_web_log.txt", "a") as log:
|
with open(os.path.join(self.outdir, "dream_web_log.txt"), "a") as log:
|
||||||
log.write(f"{path}: {json.dumps(config)}\n")
|
log.write(f"{path}: {json.dumps(config)}\n")
|
||||||
|
|
||||||
self.wfile.write(bytes(json.dumps(
|
self.wfile.write(bytes(json.dumps(
|
||||||
@ -133,7 +135,7 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
{'event':action,'processed_file_cnt':f'{x}/{iterations}'}
|
{'event':action,'processed_file_cnt':f'{x}/{iterations}'}
|
||||||
) + '\n',"utf-8"))
|
) + '\n',"utf-8"))
|
||||||
|
|
||||||
step_writer = PngWriter('./outputs/intermediates/')
|
step_writer = PngWriter(os.path.join(self.outdir, "intermediates"))
|
||||||
step_index = 1
|
step_index = 1
|
||||||
def image_progress(sample, step):
|
def image_progress(sample, step):
|
||||||
if self.canceled.is_set():
|
if self.canceled.is_set():
|
||||||
@ -144,7 +146,7 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
# and don't bother with the last one, since it'll render anyway
|
# and don't bother with the last one, since it'll render anyway
|
||||||
nonlocal step_index
|
nonlocal step_index
|
||||||
if progress_images and step % 5 == 0 and step < steps - 1:
|
if progress_images and step % 5 == 0 and step < steps - 1:
|
||||||
image = self.model._sample_to_image(sample)
|
image = self.model.sample_to_image(sample)
|
||||||
name = f'{prefix}.{seed}.{step_index}.png'
|
name = f'{prefix}.{seed}.{step_index}.png'
|
||||||
metadata = f'{prompt} -S{seed} [intermediate]'
|
metadata = f'{prompt} -S{seed} [intermediate]'
|
||||||
path = step_writer.save_image_and_prompt_to_png(image, metadata, name)
|
path = step_writer.save_image_and_prompt_to_png(image, metadata, name)
|
||||||
@ -166,6 +168,7 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
gfpgan_strength = gfpgan_strength,
|
gfpgan_strength = gfpgan_strength,
|
||||||
upscale = upscale,
|
upscale = upscale,
|
||||||
sampler_name = sampler_name,
|
sampler_name = sampler_name,
|
||||||
|
seamless = seamless,
|
||||||
step_callback=image_progress,
|
step_callback=image_progress,
|
||||||
image_callback=image_done,
|
image_callback=image_done,
|
||||||
threshold=threshold,
|
threshold=threshold,
|
||||||
@ -189,6 +192,7 @@ class DreamServer(BaseHTTPRequestHandler):
|
|||||||
width = width,
|
width = width,
|
||||||
height = height,
|
height = height,
|
||||||
fit = fit,
|
fit = fit,
|
||||||
|
seamless = seamless,
|
||||||
gfpgan_strength=gfpgan_strength,
|
gfpgan_strength=gfpgan_strength,
|
||||||
upscale = upscale,
|
upscale = upscale,
|
||||||
step_callback=image_progress,
|
step_callback=image_progress,
|
||||||
|
642
ldm/generate.py
Normal file
642
ldm/generate.py
Normal file
@ -0,0 +1,642 @@
|
|||||||
|
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
|
||||||
|
|
||||||
|
# Derived from source code carrying the following copyrights
|
||||||
|
# Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
|
||||||
|
# Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import numpy as np
|
||||||
|
import random
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import traceback
|
||||||
|
import transformers
|
||||||
|
|
||||||
|
from omegaconf import OmegaConf
|
||||||
|
from PIL import Image, ImageOps
|
||||||
|
from torch import nn
|
||||||
|
from pytorch_lightning import seed_everything
|
||||||
|
|
||||||
|
from ldm.util import instantiate_from_config
|
||||||
|
from ldm.models.diffusion.ddim import DDIMSampler
|
||||||
|
from ldm.models.diffusion.plms import PLMSSampler
|
||||||
|
from ldm.models.diffusion.ksampler import KSampler
|
||||||
|
from ldm.dream.pngwriter import PngWriter
|
||||||
|
from ldm.dream.image_util import InitImageResizer
|
||||||
|
from ldm.dream.devices import choose_torch_device
|
||||||
|
from ldm.dream.conditioning import get_uc_and_c
|
||||||
|
|
||||||
|
"""Simplified text to image API for stable diffusion/latent diffusion
|
||||||
|
|
||||||
|
Example Usage:
|
||||||
|
|
||||||
|
from ldm.generate import Generate
|
||||||
|
|
||||||
|
# Create an object with default values
|
||||||
|
gr = Generate(model = <path> // models/ldm/stable-diffusion-v1/model.ckpt
|
||||||
|
config = <path> // configs/stable-diffusion/v1-inference.yaml
|
||||||
|
iterations = <integer> // how many times to run the sampling (1)
|
||||||
|
steps = <integer> // 50
|
||||||
|
seed = <integer> // current system time
|
||||||
|
sampler_name= ['ddim', 'k_dpm_2_a', 'k_dpm_2', 'k_euler_a', 'k_euler', 'k_heun', 'k_lms', 'plms'] // k_lms
|
||||||
|
grid = <boolean> // false
|
||||||
|
width = <integer> // image width, multiple of 64 (512)
|
||||||
|
height = <integer> // image height, multiple of 64 (512)
|
||||||
|
cfg_scale = <float> // condition-free guidance scale (7.5)
|
||||||
|
)
|
||||||
|
|
||||||
|
# do the slow model initialization
|
||||||
|
gr.load_model()
|
||||||
|
|
||||||
|
# Do the fast inference & image generation. Any options passed here
|
||||||
|
# override the default values assigned during class initialization
|
||||||
|
# Will call load_model() if the model was not previously loaded and so
|
||||||
|
# may be slow at first.
|
||||||
|
# The method returns a list of images. Each row of the list is a sub-list of [filename,seed]
|
||||||
|
results = gr.prompt2png(prompt = "an astronaut riding a horse",
|
||||||
|
outdir = "./outputs/samples",
|
||||||
|
iterations = 3)
|
||||||
|
|
||||||
|
for row in results:
|
||||||
|
print(f'filename={row[0]}')
|
||||||
|
print(f'seed ={row[1]}')
|
||||||
|
|
||||||
|
# Same thing, but using an initial image.
|
||||||
|
results = gr.prompt2png(prompt = "an astronaut riding a horse",
|
||||||
|
outdir = "./outputs/,
|
||||||
|
iterations = 3,
|
||||||
|
init_img = "./sketches/horse+rider.png")
|
||||||
|
|
||||||
|
for row in results:
|
||||||
|
print(f'filename={row[0]}')
|
||||||
|
print(f'seed ={row[1]}')
|
||||||
|
|
||||||
|
# Same thing, but we return a series of Image objects, which lets you manipulate them,
|
||||||
|
# combine them, and save them under arbitrary names
|
||||||
|
|
||||||
|
results = gr.prompt2image(prompt = "an astronaut riding a horse"
|
||||||
|
outdir = "./outputs/")
|
||||||
|
for row in results:
|
||||||
|
im = row[0]
|
||||||
|
seed = row[1]
|
||||||
|
im.save(f'./outputs/samples/an_astronaut_riding_a_horse-{seed}.png')
|
||||||
|
im.thumbnail(100,100).save('./outputs/samples/astronaut_thumb.jpg')
|
||||||
|
|
||||||
|
Note that the old txt2img() and img2img() calls are deprecated but will
|
||||||
|
still work.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class Generate:
|
||||||
|
"""Generate class
|
||||||
|
Stores default values for multiple configuration items
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
iterations = 1,
|
||||||
|
steps = 50,
|
||||||
|
cfg_scale = 7.5,
|
||||||
|
weights = 'models/ldm/stable-diffusion-v1/model.ckpt',
|
||||||
|
config = 'configs/stable-diffusion/v1-inference.yaml',
|
||||||
|
grid = False,
|
||||||
|
width = 512,
|
||||||
|
height = 512,
|
||||||
|
sampler_name = 'k_lms',
|
||||||
|
ddim_eta = 0.0, # deterministic
|
||||||
|
precision = 'autocast',
|
||||||
|
full_precision = False,
|
||||||
|
strength = 0.75, # default in scripts/img2img.py
|
||||||
|
seamless = False,
|
||||||
|
embedding_path = None,
|
||||||
|
device_type = 'cuda',
|
||||||
|
):
|
||||||
|
self.iterations = iterations
|
||||||
|
self.width = width
|
||||||
|
self.height = height
|
||||||
|
self.steps = steps
|
||||||
|
self.cfg_scale = cfg_scale
|
||||||
|
self.weights = weights
|
||||||
|
self.config = config
|
||||||
|
self.sampler_name = sampler_name
|
||||||
|
self.grid = grid
|
||||||
|
self.ddim_eta = ddim_eta
|
||||||
|
self.precision = precision
|
||||||
|
self.full_precision = True if choose_torch_device() == 'mps' else full_precision
|
||||||
|
self.strength = strength
|
||||||
|
self.seamless = seamless
|
||||||
|
self.embedding_path = embedding_path
|
||||||
|
self.device_type = device_type
|
||||||
|
self.model = None # empty for now
|
||||||
|
self.sampler = None
|
||||||
|
self.device = None
|
||||||
|
self.generators = {}
|
||||||
|
self.base_generator = None
|
||||||
|
self.seed = None
|
||||||
|
|
||||||
|
if device_type == 'cuda' and not torch.cuda.is_available():
|
||||||
|
device_type = choose_torch_device()
|
||||||
|
print(">> cuda not available, using device", device_type)
|
||||||
|
self.device = torch.device(device_type)
|
||||||
|
|
||||||
|
# for VRAM usage statistics
|
||||||
|
device_type = choose_torch_device()
|
||||||
|
self.session_peakmem = torch.cuda.max_memory_allocated() if device_type == 'cuda' else None
|
||||||
|
transformers.logging.set_verbosity_error()
|
||||||
|
|
||||||
|
def prompt2png(self, prompt, outdir, **kwargs):
|
||||||
|
"""
|
||||||
|
Takes a prompt and an output directory, writes out the requested number
|
||||||
|
of PNG files, and returns an array of [[filename,seed],[filename,seed]...]
|
||||||
|
Optional named arguments are the same as those passed to Generate and prompt2image()
|
||||||
|
"""
|
||||||
|
results = self.prompt2image(prompt, **kwargs)
|
||||||
|
pngwriter = PngWriter(outdir)
|
||||||
|
prefix = pngwriter.unique_prefix()
|
||||||
|
outputs = []
|
||||||
|
for image, seed in results:
|
||||||
|
name = f'{prefix}.{seed}.png'
|
||||||
|
path = pngwriter.save_image_and_prompt_to_png(
|
||||||
|
image, f'{prompt} -S{seed}', name)
|
||||||
|
outputs.append([path, seed])
|
||||||
|
return outputs
|
||||||
|
|
||||||
|
def txt2img(self, prompt, **kwargs):
|
||||||
|
outdir = kwargs.pop('outdir', 'outputs/img-samples')
|
||||||
|
return self.prompt2png(prompt, outdir, **kwargs)
|
||||||
|
|
||||||
|
def img2img(self, prompt, **kwargs):
|
||||||
|
outdir = kwargs.pop('outdir', 'outputs/img-samples')
|
||||||
|
assert (
|
||||||
|
'init_img' in kwargs
|
||||||
|
), 'call to img2img() must include the init_img argument'
|
||||||
|
return self.prompt2png(prompt, outdir, **kwargs)
|
||||||
|
|
||||||
|
def prompt2image(
|
||||||
|
self,
|
||||||
|
# these are common
|
||||||
|
prompt,
|
||||||
|
iterations = None,
|
||||||
|
steps = None,
|
||||||
|
seed = None,
|
||||||
|
cfg_scale = None,
|
||||||
|
ddim_eta = None,
|
||||||
|
skip_normalize = False,
|
||||||
|
image_callback = None,
|
||||||
|
step_callback = None,
|
||||||
|
width = None,
|
||||||
|
height = None,
|
||||||
|
sampler_name = None,
|
||||||
|
seamless = False,
|
||||||
|
log_tokenization= False,
|
||||||
|
with_variations = None,
|
||||||
|
variation_amount = 0.0,
|
||||||
|
# these are specific to img2img
|
||||||
|
init_img = None,
|
||||||
|
mask = None,
|
||||||
|
invert_mask = False,
|
||||||
|
fit = False,
|
||||||
|
strength = None,
|
||||||
|
# these are specific to GFPGAN/ESRGAN
|
||||||
|
gfpgan_strength= 0,
|
||||||
|
save_original = False,
|
||||||
|
upscale = None,
|
||||||
|
**args,
|
||||||
|
): # eat up additional cruft
|
||||||
|
"""
|
||||||
|
ldm.prompt2image() is the common entry point for txt2img() and img2img()
|
||||||
|
It takes the following arguments:
|
||||||
|
prompt // prompt string (no default)
|
||||||
|
iterations // iterations (1); image count=iterations
|
||||||
|
steps // refinement steps per iteration
|
||||||
|
seed // seed for random number generator
|
||||||
|
width // width of image, in multiples of 64 (512)
|
||||||
|
height // height of image, in multiples of 64 (512)
|
||||||
|
cfg_scale // how strongly the prompt influences the image (7.5) (must be >1)
|
||||||
|
seamless // whether the generated image should tile
|
||||||
|
init_img // path to an initial image
|
||||||
|
mask // path to an initial image mask for inpainting
|
||||||
|
invert_mask // paint over opaque areas, retain transparent areas
|
||||||
|
strength // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely
|
||||||
|
gfpgan_strength // strength for GFPGAN. 0.0 preserves image exactly, 1.0 replaces it completely
|
||||||
|
ddim_eta // image randomness (eta=0.0 means the same seed always produces the same image)
|
||||||
|
step_callback // a function or method that will be called each step
|
||||||
|
image_callback // a function or method that will be called each time an image is generated
|
||||||
|
with_variations // a weighted list [(seed_1, weight_1), (seed_2, weight_2), ...] of variations which should be applied before doing any generation
|
||||||
|
variation_amount // optional 0-1 value to slerp from -S noise to random noise (allows variations on an image)
|
||||||
|
|
||||||
|
To use the step callback, define a function that receives two arguments:
|
||||||
|
- Image GPU data
|
||||||
|
- The step number
|
||||||
|
|
||||||
|
To use the image callback, define a function of method that receives two arguments, an Image object
|
||||||
|
and the seed. You can then do whatever you like with the image, including converting it to
|
||||||
|
different formats and manipulating it. For example:
|
||||||
|
|
||||||
|
def process_image(image,seed):
|
||||||
|
image.save(f{'images/seed.png'})
|
||||||
|
|
||||||
|
The callback used by the prompt2png() can be found in ldm/dream_util.py. It contains code
|
||||||
|
to create the requested output directory, select a unique informative name for each image, and
|
||||||
|
write the prompt into the PNG metadata.
|
||||||
|
"""
|
||||||
|
# TODO: convert this into a getattr() loop
|
||||||
|
steps = steps or self.steps
|
||||||
|
width = width or self.width
|
||||||
|
height = height or self.height
|
||||||
|
seamless = seamless or self.seamless
|
||||||
|
cfg_scale = cfg_scale or self.cfg_scale
|
||||||
|
ddim_eta = ddim_eta or self.ddim_eta
|
||||||
|
iterations = iterations or self.iterations
|
||||||
|
strength = strength or self.strength
|
||||||
|
self.seed = seed
|
||||||
|
self.log_tokenization = log_tokenization
|
||||||
|
with_variations = [] if with_variations is None else with_variations
|
||||||
|
|
||||||
|
model = (
|
||||||
|
self.load_model()
|
||||||
|
) # will instantiate the model or return it from cache
|
||||||
|
|
||||||
|
for m in model.modules():
|
||||||
|
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||||
|
m.padding_mode = 'circular' if seamless else m._orig_padding_mode
|
||||||
|
|
||||||
|
assert cfg_scale > 1.0, 'CFG_Scale (-C) must be >1.0'
|
||||||
|
assert (
|
||||||
|
0.0 < strength < 1.0
|
||||||
|
), 'img2img and inpaint strength can only work with 0.0 < strength < 1.0'
|
||||||
|
assert (
|
||||||
|
0.0 <= variation_amount <= 1.0
|
||||||
|
), '-v --variation_amount must be in [0.0, 1.0]'
|
||||||
|
|
||||||
|
# check this logic - doesn't look right
|
||||||
|
if len(with_variations) > 0 or variation_amount > 1.0:
|
||||||
|
assert seed is not None,\
|
||||||
|
'seed must be specified when using with_variations'
|
||||||
|
if variation_amount == 0.0:
|
||||||
|
assert iterations == 1,\
|
||||||
|
'when using --with_variations, multiple iterations are only possible when using --variation_amount'
|
||||||
|
assert all(0 <= weight <= 1 for _, weight in with_variations),\
|
||||||
|
f'variation weights must be in [0.0, 1.0]: got {[weight for _, weight in with_variations]}'
|
||||||
|
|
||||||
|
width, height, _ = self._resolution_check(width, height, log=True)
|
||||||
|
|
||||||
|
if sampler_name and (sampler_name != self.sampler_name):
|
||||||
|
self.sampler_name = sampler_name
|
||||||
|
self._set_sampler()
|
||||||
|
|
||||||
|
tic = time.time()
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
torch.cuda.reset_peak_memory_stats()
|
||||||
|
|
||||||
|
results = list()
|
||||||
|
init_image = None
|
||||||
|
init_mask_image = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
uc, c = get_uc_and_c(
|
||||||
|
prompt, model=self.model,
|
||||||
|
skip_normalize=skip_normalize,
|
||||||
|
log_tokens=self.log_tokenization
|
||||||
|
)
|
||||||
|
|
||||||
|
if mask and not init_img:
|
||||||
|
raise AssertionError('If mask path is provided, initial image path should be provided as well')
|
||||||
|
|
||||||
|
if mask and init_img:
|
||||||
|
init_image,size1 = self._load_img(init_img, width, height,fit=fit)
|
||||||
|
init_image.to(self.device)
|
||||||
|
init_mask_image,size2 = self._load_img_mask(mask, width, height,fit=fit, invert=invert_mask)
|
||||||
|
init_mask_image.to(self.device)
|
||||||
|
assert size1==size2,f"for inpainting, the initial image and its mask must be identical sizes, instead got {size1} vs {size2}"
|
||||||
|
generator = self._make_inpaint()
|
||||||
|
elif init_img: # little bit of repeated code here, but makes logic clearer
|
||||||
|
init_image,_ = self._load_img(init_img, width, height, fit=fit)
|
||||||
|
init_image.to(self.device)
|
||||||
|
generator = self._make_img2img()
|
||||||
|
else:
|
||||||
|
generator = self._make_txt2img()
|
||||||
|
|
||||||
|
generator.set_variation(self.seed, variation_amount, with_variations)
|
||||||
|
results = generator.generate(
|
||||||
|
prompt,
|
||||||
|
iterations = iterations,
|
||||||
|
seed = self.seed,
|
||||||
|
sampler = self.sampler,
|
||||||
|
steps = steps,
|
||||||
|
cfg_scale = cfg_scale,
|
||||||
|
conditioning = (uc,c),
|
||||||
|
ddim_eta = ddim_eta,
|
||||||
|
image_callback = image_callback, # called after the final image is generated
|
||||||
|
step_callback = step_callback, # called after each intermediate image is generated
|
||||||
|
width = width,
|
||||||
|
height = height,
|
||||||
|
init_image = init_image, # notice that init_image is different from init_img
|
||||||
|
init_mask = init_mask_image,
|
||||||
|
strength = strength
|
||||||
|
)
|
||||||
|
|
||||||
|
if upscale is not None or gfpgan_strength > 0:
|
||||||
|
self.upscale_and_reconstruct(results,
|
||||||
|
upscale = upscale,
|
||||||
|
strength = gfpgan_strength,
|
||||||
|
save_original = save_original,
|
||||||
|
image_callback = image_callback)
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print('*interrupted*')
|
||||||
|
print(
|
||||||
|
'>> Partial results will be returned; if --grid was requested, nothing will be returned.'
|
||||||
|
)
|
||||||
|
except RuntimeError as e:
|
||||||
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
print('>> Are you sure your system has an adequate GPU?')
|
||||||
|
|
||||||
|
toc = time.time()
|
||||||
|
print('>> Usage stats:')
|
||||||
|
print(
|
||||||
|
f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f'>> Max VRAM used for this generation:',
|
||||||
|
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.session_peakmem:
|
||||||
|
self.session_peakmem = max(
|
||||||
|
self.session_peakmem, torch.cuda.max_memory_allocated()
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f'>> Max VRAM used since script start: ',
|
||||||
|
'%4.2fG' % (self.session_peakmem / 1e9),
|
||||||
|
)
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _make_img2img(self):
|
||||||
|
if not self.generators.get('img2img'):
|
||||||
|
from ldm.dream.generator.img2img import Img2Img
|
||||||
|
self.generators['img2img'] = Img2Img(self.model)
|
||||||
|
return self.generators['img2img']
|
||||||
|
|
||||||
|
def _make_txt2img(self):
|
||||||
|
if not self.generators.get('txt2img'):
|
||||||
|
from ldm.dream.generator.txt2img import Txt2Img
|
||||||
|
self.generators['txt2img'] = Txt2Img(self.model)
|
||||||
|
return self.generators['txt2img']
|
||||||
|
|
||||||
|
def _make_inpaint(self):
|
||||||
|
if not self.generators.get('inpaint'):
|
||||||
|
from ldm.dream.generator.inpaint import Inpaint
|
||||||
|
self.generators['inpaint'] = Inpaint(self.model)
|
||||||
|
return self.generators['inpaint']
|
||||||
|
|
||||||
|
def load_model(self):
|
||||||
|
"""Load and initialize the model from configuration variables passed at object creation time"""
|
||||||
|
if self.model is None:
|
||||||
|
seed_everything(random.randrange(0, np.iinfo(np.uint32).max))
|
||||||
|
try:
|
||||||
|
config = OmegaConf.load(self.config)
|
||||||
|
model = self._load_model_from_config(config, self.weights)
|
||||||
|
if self.embedding_path is not None:
|
||||||
|
model.embedding_manager.load(
|
||||||
|
self.embedding_path, self.full_precision
|
||||||
|
)
|
||||||
|
self.model = model.to(self.device)
|
||||||
|
# model.to doesn't change the cond_stage_model.device used to move the tokenizer output, so set it here
|
||||||
|
self.model.cond_stage_model.device = self.device
|
||||||
|
except AttributeError as e:
|
||||||
|
print(f'>> Error loading model. {str(e)}', file=sys.stderr)
|
||||||
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
raise SystemExit from e
|
||||||
|
|
||||||
|
self._set_sampler()
|
||||||
|
|
||||||
|
for m in self.model.modules():
|
||||||
|
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||||
|
m._orig_padding_mode = m.padding_mode
|
||||||
|
|
||||||
|
return self.model
|
||||||
|
|
||||||
|
def upscale_and_reconstruct(self,
|
||||||
|
image_list,
|
||||||
|
upscale = None,
|
||||||
|
strength = 0.0,
|
||||||
|
save_original = False,
|
||||||
|
image_callback = None):
|
||||||
|
try:
|
||||||
|
if upscale is not None:
|
||||||
|
from ldm.gfpgan.gfpgan_tools import real_esrgan_upscale
|
||||||
|
if strength > 0:
|
||||||
|
from ldm.gfpgan.gfpgan_tools import run_gfpgan
|
||||||
|
except (ModuleNotFoundError, ImportError):
|
||||||
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
|
print('>> You may need to install the ESRGAN and/or GFPGAN modules')
|
||||||
|
return
|
||||||
|
|
||||||
|
for r in image_list:
|
||||||
|
image, seed = r
|
||||||
|
try:
|
||||||
|
if upscale is not None:
|
||||||
|
if len(upscale) < 2:
|
||||||
|
upscale.append(0.75)
|
||||||
|
image = real_esrgan_upscale(
|
||||||
|
image,
|
||||||
|
upscale[1],
|
||||||
|
int(upscale[0]),
|
||||||
|
seed,
|
||||||
|
)
|
||||||
|
if strength > 0:
|
||||||
|
image = run_gfpgan(
|
||||||
|
image, strength, seed, 1
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
print(
|
||||||
|
f'>> Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}'
|
||||||
|
)
|
||||||
|
|
||||||
|
if image_callback is not None:
|
||||||
|
image_callback(image, seed, upscaled=True)
|
||||||
|
else:
|
||||||
|
r[0] = image
|
||||||
|
|
||||||
|
# to help WebGUI - front end to generator util function
|
||||||
|
def sample_to_image(self,samples):
|
||||||
|
return self._sample_to_image(samples)
|
||||||
|
|
||||||
|
def _sample_to_image(self,samples):
|
||||||
|
if not self.base_generator:
|
||||||
|
from ldm.dream.generator import Generator
|
||||||
|
self.base_generator = Generator(self.model)
|
||||||
|
return self.base_generator.sample_to_image(samples)
|
||||||
|
|
||||||
|
def _set_sampler(self):
|
||||||
|
msg = f'>> Setting Sampler to {self.sampler_name}'
|
||||||
|
if self.sampler_name == 'plms':
|
||||||
|
self.sampler = PLMSSampler(self.model, device=self.device)
|
||||||
|
elif self.sampler_name == 'ddim':
|
||||||
|
self.sampler = DDIMSampler(self.model, device=self.device)
|
||||||
|
elif self.sampler_name == 'k_dpm_2_a':
|
||||||
|
self.sampler = KSampler(
|
||||||
|
self.model, 'dpm_2_ancestral', device=self.device
|
||||||
|
)
|
||||||
|
elif self.sampler_name == 'k_dpm_2':
|
||||||
|
self.sampler = KSampler(self.model, 'dpm_2', device=self.device)
|
||||||
|
elif self.sampler_name == 'k_euler_a':
|
||||||
|
self.sampler = KSampler(
|
||||||
|
self.model, 'euler_ancestral', device=self.device
|
||||||
|
)
|
||||||
|
elif self.sampler_name == 'k_euler':
|
||||||
|
self.sampler = KSampler(self.model, 'euler', device=self.device)
|
||||||
|
elif self.sampler_name == 'k_heun':
|
||||||
|
self.sampler = KSampler(self.model, 'heun', device=self.device)
|
||||||
|
elif self.sampler_name == 'k_lms':
|
||||||
|
self.sampler = KSampler(self.model, 'lms', device=self.device)
|
||||||
|
else:
|
||||||
|
msg = f'>> Unsupported Sampler: {self.sampler_name}, Defaulting to plms'
|
||||||
|
self.sampler = PLMSSampler(self.model, device=self.device)
|
||||||
|
|
||||||
|
print(msg)
|
||||||
|
|
||||||
|
def _load_model_from_config(self, config, ckpt):
|
||||||
|
print(f'>> Loading model from {ckpt}')
|
||||||
|
pl_sd = torch.load(ckpt, map_location='cpu')
|
||||||
|
sd = pl_sd['state_dict']
|
||||||
|
model = instantiate_from_config(config.model)
|
||||||
|
m, u = model.load_state_dict(sd, strict=False)
|
||||||
|
model.to(self.device)
|
||||||
|
model.eval()
|
||||||
|
if self.full_precision:
|
||||||
|
print(
|
||||||
|
'>> Using slower but more accurate full-precision math (--full_precision)'
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
'>> Using half precision math. Call with --full_precision to use more accurate but VRAM-intensive full precision.'
|
||||||
|
)
|
||||||
|
model.half()
|
||||||
|
return model
|
||||||
|
|
||||||
|
def _load_img(self, path, width, height, fit=False):
|
||||||
|
assert os.path.exists(path), f'>> {path}: File not found'
|
||||||
|
|
||||||
|
with Image.open(path) as img:
|
||||||
|
image = img.convert('RGB')
|
||||||
|
print(
|
||||||
|
f'>> loaded input image of size {image.width}x{image.height} from {path}'
|
||||||
|
)
|
||||||
|
if fit:
|
||||||
|
image = self._fit_image(image,(width,height))
|
||||||
|
else:
|
||||||
|
image = self._squeeze_image(image)
|
||||||
|
|
||||||
|
size = image.size
|
||||||
|
image = np.array(image).astype(np.float32) / 255.0
|
||||||
|
image = image[None].transpose(0, 3, 1, 2)
|
||||||
|
image = torch.from_numpy(image)
|
||||||
|
image = 2.0 * image - 1.0
|
||||||
|
return image.to(self.device),size
|
||||||
|
|
||||||
|
def _load_img_mask(self, path, width, height, fit=False, invert=False):
|
||||||
|
assert os.path.exists(path), f'>> {path}: File not found'
|
||||||
|
|
||||||
|
image = Image.open(path)
|
||||||
|
print(
|
||||||
|
f'>> loaded input mask of size {image.width}x{image.height} from {path}'
|
||||||
|
)
|
||||||
|
|
||||||
|
if fit:
|
||||||
|
image = self._fit_image(image,(width,height))
|
||||||
|
else:
|
||||||
|
image = self._squeeze_image(image)
|
||||||
|
|
||||||
|
# convert into a black/white mask
|
||||||
|
image = self._mask_to_image(image,invert)
|
||||||
|
image = image.convert('RGB')
|
||||||
|
size = image.size
|
||||||
|
|
||||||
|
# not quite sure what's going on here. It is copied from basunjindal's implementation
|
||||||
|
# image = image.resize((64, 64), resample=Image.Resampling.LANCZOS)
|
||||||
|
# BUG: We need to use the model's downsample factor rather than hardcoding "8"
|
||||||
|
from ldm.dream.generator.base import downsampling
|
||||||
|
image = image.resize((size[0]//downsampling, size[1]//downsampling), resample=Image.Resampling.LANCZOS)
|
||||||
|
image = np.array(image)
|
||||||
|
image = image.astype(np.float32) / 255.0
|
||||||
|
image = image[None].transpose(0, 3, 1, 2)
|
||||||
|
image = torch.from_numpy(image)
|
||||||
|
return image.to(self.device),size
|
||||||
|
|
||||||
|
# The mask is expected to have the region to be inpainted
|
||||||
|
# with alpha transparency. It converts it into a black/white
|
||||||
|
# image with the transparent part black.
|
||||||
|
def _mask_to_image(self, init_mask, invert=False) -> Image:
|
||||||
|
if self._has_transparency(init_mask):
|
||||||
|
# Obtain the mask from the transparency channel
|
||||||
|
mask = Image.new(mode="L", size=init_mask.size, color=255)
|
||||||
|
mask.putdata(init_mask.getdata(band=3))
|
||||||
|
if invert:
|
||||||
|
mask = ImageOps.invert(mask)
|
||||||
|
return mask
|
||||||
|
else:
|
||||||
|
print(f'>> No transparent pixels in this image. Will paint across entire image.')
|
||||||
|
return Image.new(mode="L", size=mask.size, color=0)
|
||||||
|
|
||||||
|
def _has_transparency(self,image):
|
||||||
|
if image.info.get("transparency", None) is not None:
|
||||||
|
return True
|
||||||
|
if image.mode == "P":
|
||||||
|
transparent = image.info.get("transparency", -1)
|
||||||
|
for _, index in image.getcolors():
|
||||||
|
if index == transparent:
|
||||||
|
return True
|
||||||
|
elif image.mode == "RGBA":
|
||||||
|
extrema = image.getextrema()
|
||||||
|
if extrema[3][0] < 255:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _squeeze_image(self,image):
|
||||||
|
x,y,resize_needed = self._resolution_check(image.width,image.height)
|
||||||
|
if resize_needed:
|
||||||
|
return InitImageResizer(image).resize(x,y)
|
||||||
|
return image
|
||||||
|
|
||||||
|
|
||||||
|
def _fit_image(self,image,max_dimensions):
|
||||||
|
w,h = max_dimensions
|
||||||
|
print(
|
||||||
|
f'>> image will be resized to fit inside a box {w}x{h} in size.'
|
||||||
|
)
|
||||||
|
if image.width > image.height:
|
||||||
|
h = None # by setting h to none, we tell InitImageResizer to fit into the width and calculate height
|
||||||
|
elif image.height > image.width:
|
||||||
|
w = None # ditto for w
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
image = InitImageResizer(image).resize(w,h) # note that InitImageResizer does the multiple of 64 truncation internally
|
||||||
|
print(
|
||||||
|
f'>> after adjusting image dimensions to be multiples of 64, init image is {image.width}x{image.height}'
|
||||||
|
)
|
||||||
|
return image
|
||||||
|
|
||||||
|
def _resolution_check(self, width, height, log=False):
|
||||||
|
resize_needed = False
|
||||||
|
w, h = map(
|
||||||
|
lambda x: x - x % 64, (width, height)
|
||||||
|
) # resize to integer multiple of 64
|
||||||
|
if h != height or w != width:
|
||||||
|
if log:
|
||||||
|
print(
|
||||||
|
f'>> Provided width and height must be multiples of 64. Auto-resizing to {w}x{h}'
|
||||||
|
)
|
||||||
|
height = h
|
||||||
|
width = w
|
||||||
|
resize_needed = True
|
||||||
|
|
||||||
|
if (width * height) > (self.width * self.height):
|
||||||
|
print(">> This input is larger than your defaults. If you run out of memory, please use a smaller image.")
|
||||||
|
|
||||||
|
return width, height, resize_needed
|
||||||
|
|
||||||
|
|
@ -13,8 +13,8 @@ opt = arg_parser.parse_args()
|
|||||||
model_path = os.path.join(opt.gfpgan_dir, opt.gfpgan_model_path)
|
model_path = os.path.join(opt.gfpgan_dir, opt.gfpgan_model_path)
|
||||||
gfpgan_model_exists = os.path.isfile(model_path)
|
gfpgan_model_exists = os.path.isfile(model_path)
|
||||||
|
|
||||||
def _run_gfpgan(image, strength, prompt, seed, upsampler_scale=4):
|
def run_gfpgan(image, strength, seed, upsampler_scale=4):
|
||||||
print(f'>> GFPGAN - Restoring Faces: {prompt} : seed:{seed}')
|
print(f'>> GFPGAN - Restoring Faces for image seed:{seed}')
|
||||||
gfpgan = None
|
gfpgan = None
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
||||||
@ -46,7 +46,7 @@ def _run_gfpgan(image, strength, prompt, seed, upsampler_scale=4):
|
|||||||
|
|
||||||
if gfpgan is None:
|
if gfpgan is None:
|
||||||
print(
|
print(
|
||||||
f'>> GFPGAN not initialized, it must be loaded via the --gfpgan argument'
|
f'>> GFPGAN not initialized. Their packages must be installed as siblings to the "stable-diffusion" folder, or set explicitly using the --gfpgan_dir option.'
|
||||||
)
|
)
|
||||||
return image
|
return image
|
||||||
|
|
||||||
@ -127,9 +127,9 @@ def _load_gfpgan_bg_upsampler(bg_upsampler, upsampler_scale, bg_tile=400):
|
|||||||
return bg_upsampler
|
return bg_upsampler
|
||||||
|
|
||||||
|
|
||||||
def real_esrgan_upscale(image, strength, upsampler_scale, prompt, seed):
|
def real_esrgan_upscale(image, strength, upsampler_scale, seed):
|
||||||
print(
|
print(
|
||||||
f'>> Real-ESRGAN Upscaling: {prompt} : seed:{seed} : scale:{upsampler_scale}x'
|
f'>> Real-ESRGAN Upscaling seed:{seed} : scale:{upsampler_scale}x'
|
||||||
)
|
)
|
||||||
|
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
|
@ -171,6 +171,7 @@ class DDIMSampler(object):
|
|||||||
)
|
)
|
||||||
return samples, intermediates
|
return samples, intermediates
|
||||||
|
|
||||||
|
# This routine gets called from img2img
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def ddim_sampling(
|
def ddim_sampling(
|
||||||
self,
|
self,
|
||||||
@ -270,6 +271,7 @@ class DDIMSampler(object):
|
|||||||
|
|
||||||
return img, intermediates
|
return img, intermediates
|
||||||
|
|
||||||
|
# This routine gets called from ddim_sampling() and decode()
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def p_sample_ddim(
|
def p_sample_ddim(
|
||||||
self,
|
self,
|
||||||
@ -372,14 +374,16 @@ class DDIMSampler(object):
|
|||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def decode(
|
def decode(
|
||||||
self,
|
self,
|
||||||
x_latent,
|
x_latent,
|
||||||
cond,
|
cond,
|
||||||
t_start,
|
t_start,
|
||||||
img_callback=None,
|
img_callback=None,
|
||||||
unconditional_guidance_scale=1.0,
|
unconditional_guidance_scale=1.0,
|
||||||
unconditional_conditioning=None,
|
unconditional_conditioning=None,
|
||||||
use_original_steps=False,
|
use_original_steps=False,
|
||||||
|
init_latent = None,
|
||||||
|
mask = None,
|
||||||
):
|
):
|
||||||
|
|
||||||
timesteps = (
|
timesteps = (
|
||||||
@ -395,6 +399,8 @@ class DDIMSampler(object):
|
|||||||
|
|
||||||
iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
|
iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
|
||||||
x_dec = x_latent
|
x_dec = x_latent
|
||||||
|
x0 = init_latent
|
||||||
|
|
||||||
for i, step in enumerate(iterator):
|
for i, step in enumerate(iterator):
|
||||||
index = total_steps - i - 1
|
index = total_steps - i - 1
|
||||||
ts = torch.full(
|
ts = torch.full(
|
||||||
@ -403,6 +409,14 @@ class DDIMSampler(object):
|
|||||||
device=x_latent.device,
|
device=x_latent.device,
|
||||||
dtype=torch.long,
|
dtype=torch.long,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if mask is not None:
|
||||||
|
assert x0 is not None
|
||||||
|
xdec_orig = self.model.q_sample(
|
||||||
|
x0, ts
|
||||||
|
) # TODO: deterministic forward pass?
|
||||||
|
x_dec = xdec_orig * mask + (1.0 - mask) * x_dec
|
||||||
|
|
||||||
x_dec, _ = self.p_sample_ddim(
|
x_dec, _ = self.p_sample_ddim(
|
||||||
x_dec,
|
x_dec,
|
||||||
cond,
|
cond,
|
||||||
@ -412,6 +426,7 @@ class DDIMSampler(object):
|
|||||||
unconditional_guidance_scale=unconditional_guidance_scale,
|
unconditional_guidance_scale=unconditional_guidance_scale,
|
||||||
unconditional_conditioning=unconditional_conditioning,
|
unconditional_conditioning=unconditional_conditioning,
|
||||||
)
|
)
|
||||||
|
|
||||||
if img_callback:
|
if img_callback:
|
||||||
img_callback(x_dec, i)
|
img_callback(x_dec, i)
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ def exists(val):
|
|||||||
|
|
||||||
|
|
||||||
def uniq(arr):
|
def uniq(arr):
|
||||||
return {el: True for el in arr}.keys()
|
return{el: True for el in arr}.keys()
|
||||||
|
|
||||||
|
|
||||||
def default(val, d):
|
def default(val, d):
|
||||||
@ -45,18 +45,19 @@ class GEGLU(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class FeedForward(nn.Module):
|
class FeedForward(nn.Module):
|
||||||
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0):
|
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
inner_dim = int(dim * mult)
|
inner_dim = int(dim * mult)
|
||||||
dim_out = default(dim_out, dim)
|
dim_out = default(dim_out, dim)
|
||||||
project_in = (
|
project_in = nn.Sequential(
|
||||||
nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU())
|
nn.Linear(dim, inner_dim),
|
||||||
if not glu
|
nn.GELU()
|
||||||
else GEGLU(dim, inner_dim)
|
) if not glu else GEGLU(dim, inner_dim)
|
||||||
)
|
|
||||||
|
|
||||||
self.net = nn.Sequential(
|
self.net = nn.Sequential(
|
||||||
project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out)
|
project_in,
|
||||||
|
nn.Dropout(dropout),
|
||||||
|
nn.Linear(inner_dim, dim_out)
|
||||||
)
|
)
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
@ -73,9 +74,7 @@ def zero_module(module):
|
|||||||
|
|
||||||
|
|
||||||
def Normalize(in_channels):
|
def Normalize(in_channels):
|
||||||
return torch.nn.GroupNorm(
|
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
|
||||||
num_groups=32, num_channels=in_channels, eps=1e-6, affine=True
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class LinearAttention(nn.Module):
|
class LinearAttention(nn.Module):
|
||||||
@ -83,28 +82,17 @@ class LinearAttention(nn.Module):
|
|||||||
super().__init__()
|
super().__init__()
|
||||||
self.heads = heads
|
self.heads = heads
|
||||||
hidden_dim = dim_head * heads
|
hidden_dim = dim_head * heads
|
||||||
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
|
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
|
||||||
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
|
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
b, c, h, w = x.shape
|
b, c, h, w = x.shape
|
||||||
qkv = self.to_qkv(x)
|
qkv = self.to_qkv(x)
|
||||||
q, k, v = rearrange(
|
q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3)
|
||||||
qkv,
|
k = k.softmax(dim=-1)
|
||||||
'b (qkv heads c) h w -> qkv b heads c (h w)',
|
|
||||||
heads=self.heads,
|
|
||||||
qkv=3,
|
|
||||||
)
|
|
||||||
k = k.softmax(dim=-1)
|
|
||||||
context = torch.einsum('bhdn,bhen->bhde', k, v)
|
context = torch.einsum('bhdn,bhen->bhde', k, v)
|
||||||
out = torch.einsum('bhde,bhdn->bhen', context, q)
|
out = torch.einsum('bhde,bhdn->bhen', context, q)
|
||||||
out = rearrange(
|
out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
|
||||||
out,
|
|
||||||
'b heads c (h w) -> b (heads c) h w',
|
|
||||||
heads=self.heads,
|
|
||||||
h=h,
|
|
||||||
w=w,
|
|
||||||
)
|
|
||||||
return self.to_out(out)
|
return self.to_out(out)
|
||||||
|
|
||||||
|
|
||||||
@ -114,18 +102,26 @@ class SpatialSelfAttention(nn.Module):
|
|||||||
self.in_channels = in_channels
|
self.in_channels = in_channels
|
||||||
|
|
||||||
self.norm = Normalize(in_channels)
|
self.norm = Normalize(in_channels)
|
||||||
self.q = torch.nn.Conv2d(
|
self.q = torch.nn.Conv2d(in_channels,
|
||||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
in_channels,
|
||||||
)
|
kernel_size=1,
|
||||||
self.k = torch.nn.Conv2d(
|
stride=1,
|
||||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
padding=0)
|
||||||
)
|
self.k = torch.nn.Conv2d(in_channels,
|
||||||
self.v = torch.nn.Conv2d(
|
in_channels,
|
||||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
kernel_size=1,
|
||||||
)
|
stride=1,
|
||||||
self.proj_out = torch.nn.Conv2d(
|
padding=0)
|
||||||
in_channels, in_channels, kernel_size=1, stride=1, padding=0
|
self.v = torch.nn.Conv2d(in_channels,
|
||||||
)
|
in_channels,
|
||||||
|
kernel_size=1,
|
||||||
|
stride=1,
|
||||||
|
padding=0)
|
||||||
|
self.proj_out = torch.nn.Conv2d(in_channels,
|
||||||
|
in_channels,
|
||||||
|
kernel_size=1,
|
||||||
|
stride=1,
|
||||||
|
padding=0)
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
h_ = x
|
h_ = x
|
||||||
@ -135,12 +131,12 @@ class SpatialSelfAttention(nn.Module):
|
|||||||
v = self.v(h_)
|
v = self.v(h_)
|
||||||
|
|
||||||
# compute attention
|
# compute attention
|
||||||
b, c, h, w = q.shape
|
b,c,h,w = q.shape
|
||||||
q = rearrange(q, 'b c h w -> b (h w) c')
|
q = rearrange(q, 'b c h w -> b (h w) c')
|
||||||
k = rearrange(k, 'b c h w -> b c (h w)')
|
k = rearrange(k, 'b c h w -> b c (h w)')
|
||||||
w_ = torch.einsum('bij,bjk->bik', q, k)
|
w_ = torch.einsum('bij,bjk->bik', q, k)
|
||||||
|
|
||||||
w_ = w_ * (int(c) ** (-0.5))
|
w_ = w_ * (int(c)**(-0.5))
|
||||||
w_ = torch.nn.functional.softmax(w_, dim=2)
|
w_ = torch.nn.functional.softmax(w_, dim=2)
|
||||||
|
|
||||||
# attend to values
|
# attend to values
|
||||||
@ -150,18 +146,16 @@ class SpatialSelfAttention(nn.Module):
|
|||||||
h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
|
h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
|
||||||
h_ = self.proj_out(h_)
|
h_ = self.proj_out(h_)
|
||||||
|
|
||||||
return x + h_
|
return x+h_
|
||||||
|
|
||||||
|
|
||||||
class CrossAttention(nn.Module):
|
class CrossAttention(nn.Module):
|
||||||
def __init__(
|
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
|
||||||
self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0
|
|
||||||
):
|
|
||||||
super().__init__()
|
super().__init__()
|
||||||
inner_dim = dim_head * heads
|
inner_dim = dim_head * heads
|
||||||
context_dim = default(context_dim, query_dim)
|
context_dim = default(context_dim, query_dim)
|
||||||
|
|
||||||
self.scale = dim_head**-0.5
|
self.scale = dim_head ** -0.5
|
||||||
self.heads = heads
|
self.heads = heads
|
||||||
|
|
||||||
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
||||||
@ -169,7 +163,8 @@ class CrossAttention(nn.Module):
|
|||||||
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
||||||
|
|
||||||
self.to_out = nn.Sequential(
|
self.to_out = nn.Sequential(
|
||||||
nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)
|
nn.Linear(inner_dim, query_dim),
|
||||||
|
nn.Dropout(dropout)
|
||||||
)
|
)
|
||||||
|
|
||||||
def forward(self, x, context=None, mask=None):
|
def forward(self, x, context=None, mask=None):
|
||||||
@ -179,59 +174,43 @@ class CrossAttention(nn.Module):
|
|||||||
context = default(context, x)
|
context = default(context, x)
|
||||||
k = self.to_k(context)
|
k = self.to_k(context)
|
||||||
v = self.to_v(context)
|
v = self.to_v(context)
|
||||||
|
del context, x
|
||||||
|
|
||||||
q, k, v = map(
|
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
|
||||||
lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)
|
|
||||||
)
|
|
||||||
|
|
||||||
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
|
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale # (8, 4096, 40)
|
||||||
|
del q, k
|
||||||
|
|
||||||
if exists(mask):
|
if exists(mask):
|
||||||
mask = rearrange(mask, 'b ... -> b (...)')
|
mask = rearrange(mask, 'b ... -> b (...)')
|
||||||
max_neg_value = -torch.finfo(sim.dtype).max
|
max_neg_value = -torch.finfo(sim.dtype).max
|
||||||
mask = repeat(mask, 'b j -> (b h) () j', h=h)
|
mask = repeat(mask, 'b j -> (b h) () j', h=h)
|
||||||
sim.masked_fill_(~mask, max_neg_value)
|
sim.masked_fill_(~mask, max_neg_value)
|
||||||
|
del mask
|
||||||
|
|
||||||
# attention, what we cannot get enough of
|
# attention, what we cannot get enough of, by halves
|
||||||
attn = sim.softmax(dim=-1)
|
sim[4:] = sim[4:].softmax(dim=-1)
|
||||||
|
sim[:4] = sim[:4].softmax(dim=-1)
|
||||||
|
|
||||||
out = einsum('b i j, b j d -> b i d', attn, v)
|
sim = einsum('b i j, b j d -> b i d', sim, v)
|
||||||
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
|
sim = rearrange(sim, '(b h) n d -> b n (h d)', h=h)
|
||||||
return self.to_out(out)
|
return self.to_out(sim)
|
||||||
|
|
||||||
|
|
||||||
class BasicTransformerBlock(nn.Module):
|
class BasicTransformerBlock(nn.Module):
|
||||||
def __init__(
|
def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True):
|
||||||
self,
|
|
||||||
dim,
|
|
||||||
n_heads,
|
|
||||||
d_head,
|
|
||||||
dropout=0.0,
|
|
||||||
context_dim=None,
|
|
||||||
gated_ff=True,
|
|
||||||
checkpoint=True,
|
|
||||||
):
|
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.attn1 = CrossAttention(
|
self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention
|
||||||
query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout
|
|
||||||
) # is a self-attention
|
|
||||||
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
|
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
|
||||||
self.attn2 = CrossAttention(
|
self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim,
|
||||||
query_dim=dim,
|
heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none
|
||||||
context_dim=context_dim,
|
|
||||||
heads=n_heads,
|
|
||||||
dim_head=d_head,
|
|
||||||
dropout=dropout,
|
|
||||||
) # is self-attn if context is none
|
|
||||||
self.norm1 = nn.LayerNorm(dim)
|
self.norm1 = nn.LayerNorm(dim)
|
||||||
self.norm2 = nn.LayerNorm(dim)
|
self.norm2 = nn.LayerNorm(dim)
|
||||||
self.norm3 = nn.LayerNorm(dim)
|
self.norm3 = nn.LayerNorm(dim)
|
||||||
self.checkpoint = checkpoint
|
self.checkpoint = checkpoint
|
||||||
|
|
||||||
def forward(self, x, context=None):
|
def forward(self, x, context=None):
|
||||||
return checkpoint(
|
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
|
||||||
self._forward, (x, context), self.parameters(), self.checkpoint
|
|
||||||
)
|
|
||||||
|
|
||||||
def _forward(self, x, context=None):
|
def _forward(self, x, context=None):
|
||||||
x = x.contiguous() if x.device.type == 'mps' else x
|
x = x.contiguous() if x.device.type == 'mps' else x
|
||||||
@ -249,43 +228,29 @@ class SpatialTransformer(nn.Module):
|
|||||||
Then apply standard transformer action.
|
Then apply standard transformer action.
|
||||||
Finally, reshape to image
|
Finally, reshape to image
|
||||||
"""
|
"""
|
||||||
|
def __init__(self, in_channels, n_heads, d_head,
|
||||||
def __init__(
|
depth=1, dropout=0., context_dim=None):
|
||||||
self,
|
|
||||||
in_channels,
|
|
||||||
n_heads,
|
|
||||||
d_head,
|
|
||||||
depth=1,
|
|
||||||
dropout=0.0,
|
|
||||||
context_dim=None,
|
|
||||||
):
|
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.in_channels = in_channels
|
self.in_channels = in_channels
|
||||||
inner_dim = n_heads * d_head
|
inner_dim = n_heads * d_head
|
||||||
self.norm = Normalize(in_channels)
|
self.norm = Normalize(in_channels)
|
||||||
|
|
||||||
self.proj_in = nn.Conv2d(
|
self.proj_in = nn.Conv2d(in_channels,
|
||||||
in_channels, inner_dim, kernel_size=1, stride=1, padding=0
|
inner_dim,
|
||||||
)
|
kernel_size=1,
|
||||||
|
stride=1,
|
||||||
|
padding=0)
|
||||||
|
|
||||||
self.transformer_blocks = nn.ModuleList(
|
self.transformer_blocks = nn.ModuleList(
|
||||||
[
|
[BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim)
|
||||||
BasicTransformerBlock(
|
for d in range(depth)]
|
||||||
inner_dim,
|
|
||||||
n_heads,
|
|
||||||
d_head,
|
|
||||||
dropout=dropout,
|
|
||||||
context_dim=context_dim,
|
|
||||||
)
|
|
||||||
for d in range(depth)
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
self.proj_out = zero_module(
|
self.proj_out = zero_module(nn.Conv2d(inner_dim,
|
||||||
nn.Conv2d(
|
in_channels,
|
||||||
inner_dim, in_channels, kernel_size=1, stride=1, padding=0
|
kernel_size=1,
|
||||||
)
|
stride=1,
|
||||||
)
|
padding=0))
|
||||||
|
|
||||||
def forward(self, x, context=None):
|
def forward(self, x, context=None):
|
||||||
# note: if no context is given, cross-attention defaults to self-attention
|
# note: if no context is given, cross-attention defaults to self-attention
|
||||||
|
849
ldm/simplet2i.py
849
ldm/simplet2i.py
@ -1,842 +1,13 @@
|
|||||||
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
|
'''
|
||||||
|
This module is provided for backward compatibility with the
|
||||||
|
original (hasty) API.
|
||||||
|
|
||||||
# Derived from source code carrying the following copyrights
|
Please use ldm.generate instead.
|
||||||
# Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
|
'''
|
||||||
# Copyright (c) 2022 Robin Rombach and Patrick Esser and contributors
|
|
||||||
|
|
||||||
import torch
|
from ldm.generate import Generate
|
||||||
import numpy as np
|
|
||||||
import random
|
|
||||||
import os
|
|
||||||
import traceback
|
|
||||||
from ldm.modules.diffusionmodules.util import noise_like
|
|
||||||
from omegaconf import OmegaConf
|
|
||||||
from PIL import Image
|
|
||||||
from tqdm import tqdm, trange
|
|
||||||
from itertools import islice
|
|
||||||
from einops import rearrange, repeat
|
|
||||||
from torchvision.utils import make_grid
|
|
||||||
from pytorch_lightning import seed_everything
|
|
||||||
from torch import autocast
|
|
||||||
from contextlib import contextmanager, nullcontext
|
|
||||||
import transformers
|
|
||||||
import time
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from ldm.util import instantiate_from_config, rand_perlin_2d
|
class T2I(Generate):
|
||||||
from ldm.models.diffusion.ddim import DDIMSampler
|
def __init__(self,**kwargs):
|
||||||
from ldm.models.diffusion.plms import PLMSSampler
|
print(f'>> The ldm.simplet2i module is deprecated. Use ldm.generate instead. It is a drop-in replacement.')
|
||||||
from ldm.models.diffusion.ksampler import KSampler
|
super().__init__(kwargs)
|
||||||
from ldm.dream.pngwriter import PngWriter
|
|
||||||
from ldm.dream.image_util import InitImageResizer
|
|
||||||
from ldm.dream.devices import choose_autocast_device, choose_torch_device
|
|
||||||
|
|
||||||
"""Simplified text to image API for stable diffusion/latent diffusion
|
|
||||||
|
|
||||||
Example Usage:
|
|
||||||
|
|
||||||
from ldm.simplet2i import T2I
|
|
||||||
|
|
||||||
# Create an object with default values
|
|
||||||
t2i = T2I(model = <path> // models/ldm/stable-diffusion-v1/model.ckpt
|
|
||||||
config = <path> // configs/stable-diffusion/v1-inference.yaml
|
|
||||||
iterations = <integer> // how many times to run the sampling (1)
|
|
||||||
steps = <integer> // 50
|
|
||||||
seed = <integer> // current system time
|
|
||||||
sampler_name= ['ddim', 'k_dpm_2_a', 'k_dpm_2', 'k_euler_a', 'k_euler', 'k_heun', 'k_lms', 'plms'] // k_lms
|
|
||||||
grid = <boolean> // false
|
|
||||||
width = <integer> // image width, multiple of 64 (512)
|
|
||||||
height = <integer> // image height, multiple of 64 (512)
|
|
||||||
cfg_scale = <float> // unconditional guidance scale (7.5)
|
|
||||||
)
|
|
||||||
|
|
||||||
# do the slow model initialization
|
|
||||||
t2i.load_model()
|
|
||||||
|
|
||||||
# Do the fast inference & image generation. Any options passed here
|
|
||||||
# override the default values assigned during class initialization
|
|
||||||
# Will call load_model() if the model was not previously loaded and so
|
|
||||||
# may be slow at first.
|
|
||||||
# The method returns a list of images. Each row of the list is a sub-list of [filename,seed]
|
|
||||||
results = t2i.prompt2png(prompt = "an astronaut riding a horse",
|
|
||||||
outdir = "./outputs/samples",
|
|
||||||
iterations = 3)
|
|
||||||
|
|
||||||
for row in results:
|
|
||||||
print(f'filename={row[0]}')
|
|
||||||
print(f'seed ={row[1]}')
|
|
||||||
|
|
||||||
# Same thing, but using an initial image.
|
|
||||||
results = t2i.prompt2png(prompt = "an astronaut riding a horse",
|
|
||||||
outdir = "./outputs/,
|
|
||||||
iterations = 3,
|
|
||||||
init_img = "./sketches/horse+rider.png")
|
|
||||||
|
|
||||||
for row in results:
|
|
||||||
print(f'filename={row[0]}')
|
|
||||||
print(f'seed ={row[1]}')
|
|
||||||
|
|
||||||
# Same thing, but we return a series of Image objects, which lets you manipulate them,
|
|
||||||
# combine them, and save them under arbitrary names
|
|
||||||
|
|
||||||
results = t2i.prompt2image(prompt = "an astronaut riding a horse"
|
|
||||||
outdir = "./outputs/")
|
|
||||||
for row in results:
|
|
||||||
im = row[0]
|
|
||||||
seed = row[1]
|
|
||||||
im.save(f'./outputs/samples/an_astronaut_riding_a_horse-{seed}.png')
|
|
||||||
im.thumbnail(100,100).save('./outputs/samples/astronaut_thumb.jpg')
|
|
||||||
|
|
||||||
Note that the old txt2img() and img2img() calls are deprecated but will
|
|
||||||
still work.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class T2I:
|
|
||||||
"""T2I class
|
|
||||||
Attributes
|
|
||||||
----------
|
|
||||||
model
|
|
||||||
config
|
|
||||||
iterations
|
|
||||||
steps
|
|
||||||
seed
|
|
||||||
sampler_name
|
|
||||||
width
|
|
||||||
height
|
|
||||||
cfg_scale
|
|
||||||
latent_channels
|
|
||||||
downsampling_factor
|
|
||||||
precision
|
|
||||||
strength
|
|
||||||
embedding_path
|
|
||||||
|
|
||||||
The vast majority of these arguments default to reasonable values.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
iterations=1,
|
|
||||||
steps=50,
|
|
||||||
seed=None,
|
|
||||||
cfg_scale=7.5,
|
|
||||||
weights='models/ldm/stable-diffusion-v1/model.ckpt',
|
|
||||||
config='configs/stable-diffusion/v1-inference.yaml',
|
|
||||||
grid=False,
|
|
||||||
width=512,
|
|
||||||
height=512,
|
|
||||||
sampler_name='k_lms',
|
|
||||||
latent_channels=4,
|
|
||||||
downsampling_factor=8,
|
|
||||||
ddim_eta=0.0, # deterministic
|
|
||||||
precision='autocast',
|
|
||||||
full_precision=False,
|
|
||||||
strength=0.75, # default in scripts/img2img.py
|
|
||||||
embedding_path=None,
|
|
||||||
device_type = 'cuda',
|
|
||||||
# just to keep track of this parameter when regenerating prompt
|
|
||||||
# needs to be replaced when new configuration system implemented.
|
|
||||||
latent_diffusion_weights=False,
|
|
||||||
):
|
|
||||||
self.iterations = iterations
|
|
||||||
self.width = width
|
|
||||||
self.height = height
|
|
||||||
self.steps = steps
|
|
||||||
self.cfg_scale = cfg_scale
|
|
||||||
self.weights = weights
|
|
||||||
self.config = config
|
|
||||||
self.sampler_name = sampler_name
|
|
||||||
self.latent_channels = latent_channels
|
|
||||||
self.downsampling_factor = downsampling_factor
|
|
||||||
self.grid = grid
|
|
||||||
self.ddim_eta = ddim_eta
|
|
||||||
self.precision = precision
|
|
||||||
self.full_precision = full_precision
|
|
||||||
self.strength = strength
|
|
||||||
self.embedding_path = embedding_path
|
|
||||||
self.device_type = device_type
|
|
||||||
self.model = None # empty for now
|
|
||||||
self.sampler = None
|
|
||||||
self.device = None
|
|
||||||
self.latent_diffusion_weights = latent_diffusion_weights
|
|
||||||
|
|
||||||
if device_type == 'cuda' and not torch.cuda.is_available():
|
|
||||||
device_type = choose_torch_device()
|
|
||||||
print(">> cuda not available, using device", device_type)
|
|
||||||
self.device = torch.device(device_type)
|
|
||||||
|
|
||||||
# for VRAM usage statistics
|
|
||||||
device_type = choose_torch_device()
|
|
||||||
self.session_peakmem = torch.cuda.max_memory_allocated() if device_type == 'cuda' else None
|
|
||||||
|
|
||||||
if seed is None:
|
|
||||||
self.seed = self._new_seed()
|
|
||||||
else:
|
|
||||||
self.seed = seed
|
|
||||||
transformers.logging.set_verbosity_error()
|
|
||||||
|
|
||||||
def prompt2png(self, prompt, outdir, **kwargs):
|
|
||||||
"""
|
|
||||||
Takes a prompt and an output directory, writes out the requested number
|
|
||||||
of PNG files, and returns an array of [[filename,seed],[filename,seed]...]
|
|
||||||
Optional named arguments are the same as those passed to T2I and prompt2image()
|
|
||||||
"""
|
|
||||||
results = self.prompt2image(prompt, **kwargs)
|
|
||||||
pngwriter = PngWriter(outdir)
|
|
||||||
prefix = pngwriter.unique_prefix()
|
|
||||||
outputs = []
|
|
||||||
for image, seed in results:
|
|
||||||
name = f'{prefix}.{seed}.png'
|
|
||||||
path = pngwriter.save_image_and_prompt_to_png(
|
|
||||||
image, f'{prompt} -S{seed}', name)
|
|
||||||
outputs.append([path, seed])
|
|
||||||
return outputs
|
|
||||||
|
|
||||||
def txt2img(self, prompt, **kwargs):
|
|
||||||
outdir = kwargs.pop('outdir', 'outputs/img-samples')
|
|
||||||
return self.prompt2png(prompt, outdir, **kwargs)
|
|
||||||
|
|
||||||
def img2img(self, prompt, **kwargs):
|
|
||||||
outdir = kwargs.pop('outdir', 'outputs/img-samples')
|
|
||||||
assert (
|
|
||||||
'init_img' in kwargs
|
|
||||||
), 'call to img2img() must include the init_img argument'
|
|
||||||
return self.prompt2png(prompt, outdir, **kwargs)
|
|
||||||
|
|
||||||
def prompt2image(
|
|
||||||
self,
|
|
||||||
# these are common
|
|
||||||
prompt,
|
|
||||||
iterations = None,
|
|
||||||
steps = None,
|
|
||||||
seed = None,
|
|
||||||
cfg_scale = None,
|
|
||||||
ddim_eta = None,
|
|
||||||
skip_normalize = False,
|
|
||||||
image_callback = None,
|
|
||||||
step_callback = None,
|
|
||||||
width = None,
|
|
||||||
height = None,
|
|
||||||
# these are specific to img2img
|
|
||||||
init_img = None,
|
|
||||||
fit = False,
|
|
||||||
strength = None,
|
|
||||||
gfpgan_strength= 0,
|
|
||||||
save_original = False,
|
|
||||||
upscale = None,
|
|
||||||
sampler_name = None,
|
|
||||||
log_tokenization= False,
|
|
||||||
with_variations = None,
|
|
||||||
variation_amount = 0.0,
|
|
||||||
threshold = 0,
|
|
||||||
perlin = 0,
|
|
||||||
**args,
|
|
||||||
): # eat up additional cruft
|
|
||||||
"""
|
|
||||||
ldm.prompt2image() is the common entry point for txt2img() and img2img()
|
|
||||||
It takes the following arguments:
|
|
||||||
prompt // prompt string (no default)
|
|
||||||
iterations // iterations (1); image count=iterations
|
|
||||||
steps // refinement steps per iteration
|
|
||||||
seed // seed for random number generator
|
|
||||||
width // width of image, in multiples of 64 (512)
|
|
||||||
height // height of image, in multiples of 64 (512)
|
|
||||||
cfg_scale // how strongly the prompt influences the image (7.5) (must be >1)
|
|
||||||
init_img // path to an initial image - its dimensions override width and height
|
|
||||||
strength // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely
|
|
||||||
gfpgan_strength // strength for GFPGAN. 0.0 preserves image exactly, 1.0 replaces it completely
|
|
||||||
ddim_eta // image randomness (eta=0.0 means the same seed always produces the same image)
|
|
||||||
step_callback // a function or method that will be called each step
|
|
||||||
image_callback // a function or method that will be called each time an image is generated
|
|
||||||
with_variations // a weighted list [(seed_1, weight_1), (seed_2, weight_2), ...] of variations which should be applied before doing any generation
|
|
||||||
variation_amount // optional 0-1 value to slerp from -S noise to random noise (allows variations on an image)
|
|
||||||
|
|
||||||
To use the step callback, define a function that receives two arguments:
|
|
||||||
- Image GPU data
|
|
||||||
- The step number
|
|
||||||
|
|
||||||
To use the image callback, define a function of method that receives two arguments, an Image object
|
|
||||||
and the seed. You can then do whatever you like with the image, including converting it to
|
|
||||||
different formats and manipulating it. For example:
|
|
||||||
|
|
||||||
def process_image(image,seed):
|
|
||||||
image.save(f{'images/seed.png'})
|
|
||||||
|
|
||||||
The callback used by the prompt2png() can be found in ldm/dream_util.py. It contains code
|
|
||||||
to create the requested output directory, select a unique informative name for each image, and
|
|
||||||
write the prompt into the PNG metadata.
|
|
||||||
"""
|
|
||||||
# TODO: convert this into a getattr() loop
|
|
||||||
steps = steps or self.steps
|
|
||||||
width = width or self.width
|
|
||||||
height = height or self.height
|
|
||||||
cfg_scale = cfg_scale or self.cfg_scale
|
|
||||||
ddim_eta = ddim_eta or self.ddim_eta
|
|
||||||
iterations = iterations or self.iterations
|
|
||||||
strength = strength or self.strength
|
|
||||||
self.log_tokenization = log_tokenization
|
|
||||||
with_variations = [] if with_variations is None else with_variations
|
|
||||||
|
|
||||||
model = (
|
|
||||||
self.load_model()
|
|
||||||
) # will instantiate the model or return it from cache
|
|
||||||
assert cfg_scale > 1.0, 'CFG_Scale (-C) must be >1.0'
|
|
||||||
assert (
|
|
||||||
0.0 <= strength <= 1.0
|
|
||||||
), 'can only work with strength in [0.0, 1.0]'
|
|
||||||
assert (
|
|
||||||
0.0 <= variation_amount <= 1.0
|
|
||||||
), '-v --variation_amount must be in [0.0, 1.0]'
|
|
||||||
|
|
||||||
if len(with_variations) > 0:
|
|
||||||
assert seed is not None,\
|
|
||||||
'seed must be specified when using with_variations'
|
|
||||||
if variation_amount == 0.0:
|
|
||||||
assert iterations == 1,\
|
|
||||||
'when using --with_variations, multiple iterations are only possible when using --variation_amount'
|
|
||||||
assert all(0 <= weight <= 1 for _, weight in with_variations),\
|
|
||||||
f'variation weights must be in [0.0, 1.0]: got {[weight for _, weight in with_variations]}'
|
|
||||||
|
|
||||||
seed = seed or self.seed
|
|
||||||
width, height, _ = self._resolution_check(width, height, log=True)
|
|
||||||
|
|
||||||
# TODO: - Check if this is still necessary to run on M1 devices.
|
|
||||||
# - Move code into ldm.dream.devices to live alongside other
|
|
||||||
# special-hardware casing code.
|
|
||||||
if self.precision == 'autocast' and torch.cuda.is_available():
|
|
||||||
scope = autocast
|
|
||||||
else:
|
|
||||||
scope = nullcontext
|
|
||||||
|
|
||||||
if sampler_name and (sampler_name != self.sampler_name):
|
|
||||||
self.sampler_name = sampler_name
|
|
||||||
self._set_sampler()
|
|
||||||
|
|
||||||
tic = time.time()
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
torch.cuda.reset_peak_memory_stats()
|
|
||||||
results = list()
|
|
||||||
|
|
||||||
try:
|
|
||||||
if init_img:
|
|
||||||
assert os.path.exists(init_img), f'{init_img}: File not found'
|
|
||||||
init_image = self._load_img(init_img, width, height, fit).to(self.device)
|
|
||||||
with scope(self.device.type):
|
|
||||||
init_latent = self.model.get_first_stage_encoding(
|
|
||||||
self.model.encode_first_stage(init_image)
|
|
||||||
) # move to latent space
|
|
||||||
|
|
||||||
make_image = self._img2img(
|
|
||||||
prompt,
|
|
||||||
steps=steps,
|
|
||||||
cfg_scale=cfg_scale,
|
|
||||||
ddim_eta=ddim_eta,
|
|
||||||
skip_normalize=skip_normalize,
|
|
||||||
init_latent=init_latent,
|
|
||||||
strength=strength,
|
|
||||||
callback=step_callback,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
make_image = self._txt2img(
|
|
||||||
prompt,
|
|
||||||
steps=steps,
|
|
||||||
cfg_scale=cfg_scale,
|
|
||||||
ddim_eta=ddim_eta,
|
|
||||||
skip_normalize=skip_normalize,
|
|
||||||
width=width,
|
|
||||||
height=height,
|
|
||||||
callback=step_callback,
|
|
||||||
threshold=threshold,
|
|
||||||
perlin=perlin,
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_noise():
|
|
||||||
if init_img:
|
|
||||||
x = torch.randn_like(init_latent, device=self.device)
|
|
||||||
else:
|
|
||||||
x = torch.randn([1,
|
|
||||||
self.latent_channels,
|
|
||||||
height // self.downsampling_factor,
|
|
||||||
width // self.downsampling_factor],
|
|
||||||
device=self.device)
|
|
||||||
if perlin > 0.0:
|
|
||||||
shape = x.shape
|
|
||||||
perlin_noise = torch.stack([rand_perlin_2d((shape[2], shape[3]), (8, 8)).to(self.device) for _ in range(shape[1])], dim=0)
|
|
||||||
x = (1 - perlin) * x + perlin * perlin_noise
|
|
||||||
return x
|
|
||||||
|
|
||||||
initial_noise = None
|
|
||||||
if variation_amount > 0 or len(with_variations) > 0:
|
|
||||||
# use fixed initial noise plus random noise per iteration
|
|
||||||
seed_everything(seed)
|
|
||||||
initial_noise = get_noise()
|
|
||||||
for v_seed, v_weight in with_variations:
|
|
||||||
seed = v_seed
|
|
||||||
seed_everything(seed)
|
|
||||||
next_noise = get_noise()
|
|
||||||
initial_noise = self.slerp(v_weight, initial_noise, next_noise)
|
|
||||||
if variation_amount > 0:
|
|
||||||
random.seed() # reset RNG to an actually random state, so we can get a random seed for variations
|
|
||||||
seed = random.randrange(0,np.iinfo(np.uint32).max)
|
|
||||||
|
|
||||||
device_type = choose_autocast_device(self.device)
|
|
||||||
with scope(device_type), self.model.ema_scope():
|
|
||||||
for n in trange(iterations, desc='Generating'):
|
|
||||||
x_T = None
|
|
||||||
if variation_amount > 0:
|
|
||||||
seed_everything(seed)
|
|
||||||
target_noise = get_noise()
|
|
||||||
x_T = self.slerp(variation_amount, initial_noise, target_noise)
|
|
||||||
elif initial_noise is not None:
|
|
||||||
# i.e. we specified particular variations
|
|
||||||
x_T = initial_noise
|
|
||||||
else:
|
|
||||||
seed_everything(seed)
|
|
||||||
x_T = get_noise()
|
|
||||||
image = make_image(x_T)
|
|
||||||
results.append([image, seed])
|
|
||||||
if image_callback is not None:
|
|
||||||
image_callback(image, seed)
|
|
||||||
seed = self._new_seed()
|
|
||||||
|
|
||||||
if upscale is not None or gfpgan_strength > 0:
|
|
||||||
for result in results:
|
|
||||||
image, seed = result
|
|
||||||
try:
|
|
||||||
if upscale is not None:
|
|
||||||
from ldm.gfpgan.gfpgan_tools import (
|
|
||||||
real_esrgan_upscale,
|
|
||||||
)
|
|
||||||
if len(upscale) < 2:
|
|
||||||
upscale.append(0.75)
|
|
||||||
image = real_esrgan_upscale(
|
|
||||||
image,
|
|
||||||
upscale[1],
|
|
||||||
int(upscale[0]),
|
|
||||||
prompt,
|
|
||||||
seed,
|
|
||||||
)
|
|
||||||
if gfpgan_strength > 0:
|
|
||||||
from ldm.gfpgan.gfpgan_tools import _run_gfpgan
|
|
||||||
|
|
||||||
image = _run_gfpgan(
|
|
||||||
image, gfpgan_strength, prompt, seed, 1
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
print(
|
|
||||||
f'>> Error running RealESRGAN - Your image was not upscaled.\n{e}'
|
|
||||||
)
|
|
||||||
if image_callback is not None:
|
|
||||||
if save_original:
|
|
||||||
image_callback(image, seed)
|
|
||||||
else:
|
|
||||||
image_callback(image, seed, upscaled=True)
|
|
||||||
else: # no callback passed, so we simply replace old image with rescaled one
|
|
||||||
result[0] = image
|
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
print('*interrupted*')
|
|
||||||
print(
|
|
||||||
'>> Partial results will be returned; if --grid was requested, nothing will be returned.'
|
|
||||||
)
|
|
||||||
except RuntimeError as e:
|
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
|
||||||
print('>> Are you sure your system has an adequate NVIDIA GPU?')
|
|
||||||
|
|
||||||
toc = time.time()
|
|
||||||
print('>> Usage stats:')
|
|
||||||
print(
|
|
||||||
f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f'>> Max VRAM used for this generation:',
|
|
||||||
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.session_peakmem:
|
|
||||||
self.session_peakmem = max(
|
|
||||||
self.session_peakmem, torch.cuda.max_memory_allocated()
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
f'>> Max VRAM used since script start: ',
|
|
||||||
'%4.2fG' % (self.session_peakmem / 1e9),
|
|
||||||
)
|
|
||||||
return results
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def _txt2img(
|
|
||||||
self,
|
|
||||||
prompt,
|
|
||||||
steps,
|
|
||||||
cfg_scale,
|
|
||||||
ddim_eta,
|
|
||||||
skip_normalize,
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
callback,
|
|
||||||
threshold,
|
|
||||||
perlin,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Returns a function returning an image derived from the prompt and the initial image
|
|
||||||
Return value depends on the seed at the time you call it
|
|
||||||
"""
|
|
||||||
|
|
||||||
sampler = self.sampler
|
|
||||||
|
|
||||||
def make_image(x_T):
|
|
||||||
uc, c = self._get_uc_and_c(prompt, skip_normalize)
|
|
||||||
shape = [
|
|
||||||
self.latent_channels,
|
|
||||||
height // self.downsampling_factor,
|
|
||||||
width // self.downsampling_factor,
|
|
||||||
]
|
|
||||||
samples, _ = sampler.sample(
|
|
||||||
batch_size=1,
|
|
||||||
S=steps,
|
|
||||||
x_T=x_T,
|
|
||||||
conditioning=c,
|
|
||||||
shape=shape,
|
|
||||||
verbose=False,
|
|
||||||
unconditional_guidance_scale=cfg_scale,
|
|
||||||
unconditional_conditioning=uc,
|
|
||||||
eta=ddim_eta,
|
|
||||||
img_callback=callback,
|
|
||||||
threshold=threshold,
|
|
||||||
perlin=perlin,
|
|
||||||
)
|
|
||||||
return self._sample_to_image(samples)
|
|
||||||
return make_image
|
|
||||||
|
|
||||||
@torch.no_grad()
|
|
||||||
def _img2img(
|
|
||||||
self,
|
|
||||||
prompt,
|
|
||||||
steps,
|
|
||||||
cfg_scale,
|
|
||||||
ddim_eta,
|
|
||||||
skip_normalize,
|
|
||||||
init_latent,
|
|
||||||
strength,
|
|
||||||
callback, # Currently not implemented for img2img
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Returns a function returning an image derived from the prompt and the initial image
|
|
||||||
Return value depends on the seed at the time you call it
|
|
||||||
"""
|
|
||||||
|
|
||||||
# PLMS sampler not supported yet, so ignore previous sampler
|
|
||||||
if self.sampler_name != 'ddim':
|
|
||||||
print(
|
|
||||||
f">> sampler '{self.sampler_name}' is not yet supported. Using DDIM sampler"
|
|
||||||
)
|
|
||||||
sampler = DDIMSampler(self.model, device=self.device)
|
|
||||||
else:
|
|
||||||
sampler = self.sampler
|
|
||||||
|
|
||||||
sampler.make_schedule(
|
|
||||||
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
|
||||||
)
|
|
||||||
|
|
||||||
t_enc = int(strength * steps)
|
|
||||||
|
|
||||||
def make_image(x_T):
|
|
||||||
uc, c = self._get_uc_and_c(prompt, skip_normalize)
|
|
||||||
|
|
||||||
# encode (scaled latent)
|
|
||||||
z_enc = sampler.stochastic_encode(
|
|
||||||
init_latent,
|
|
||||||
torch.tensor([t_enc]).to(self.device),
|
|
||||||
noise=x_T
|
|
||||||
)
|
|
||||||
# decode it
|
|
||||||
samples = sampler.decode(
|
|
||||||
z_enc,
|
|
||||||
c,
|
|
||||||
t_enc,
|
|
||||||
img_callback=callback,
|
|
||||||
unconditional_guidance_scale=cfg_scale,
|
|
||||||
unconditional_conditioning=uc,
|
|
||||||
)
|
|
||||||
return self._sample_to_image(samples)
|
|
||||||
return make_image
|
|
||||||
|
|
||||||
# TODO: does this actually need to run every loop? does anything in it vary by random seed?
|
|
||||||
def _get_uc_and_c(self, prompt, skip_normalize):
|
|
||||||
|
|
||||||
uc = self.model.get_learned_conditioning([''])
|
|
||||||
|
|
||||||
# get weighted sub-prompts
|
|
||||||
weighted_subprompts = T2I._split_weighted_subprompts(
|
|
||||||
prompt, skip_normalize)
|
|
||||||
|
|
||||||
if len(weighted_subprompts) > 1:
|
|
||||||
# i dont know if this is correct.. but it works
|
|
||||||
c = torch.zeros_like(uc)
|
|
||||||
# normalize each "sub prompt" and add it
|
|
||||||
for subprompt, weight in weighted_subprompts:
|
|
||||||
self._log_tokenization(subprompt)
|
|
||||||
c = torch.add(
|
|
||||||
c,
|
|
||||||
self.model.get_learned_conditioning([subprompt]),
|
|
||||||
alpha=weight,
|
|
||||||
)
|
|
||||||
else: # just standard 1 prompt
|
|
||||||
self._log_tokenization(prompt)
|
|
||||||
c = self.model.get_learned_conditioning([prompt])
|
|
||||||
return (uc, c)
|
|
||||||
|
|
||||||
def _sample_to_image(self, samples):
|
|
||||||
x_samples = self.model.decode_first_stage(samples)
|
|
||||||
x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
|
|
||||||
if len(x_samples) != 1:
|
|
||||||
raise Exception(
|
|
||||||
f'>> expected to get a single image, but got {len(x_samples)}')
|
|
||||||
x_sample = 255.0 * rearrange(
|
|
||||||
x_samples[0].cpu().numpy(), 'c h w -> h w c'
|
|
||||||
)
|
|
||||||
return Image.fromarray(x_sample.astype(np.uint8))
|
|
||||||
|
|
||||||
def _new_seed(self):
|
|
||||||
self.seed = random.randrange(0, np.iinfo(np.uint32).max)
|
|
||||||
return self.seed
|
|
||||||
|
|
||||||
def load_model(self):
|
|
||||||
"""Load and initialize the model from configuration variables passed at object creation time"""
|
|
||||||
if self.model is None:
|
|
||||||
seed_everything(self.seed)
|
|
||||||
try:
|
|
||||||
config = OmegaConf.load(self.config)
|
|
||||||
model = self._load_model_from_config(config, self.weights)
|
|
||||||
if self.embedding_path is not None:
|
|
||||||
model.embedding_manager.load(
|
|
||||||
self.embedding_path, self.full_precision
|
|
||||||
)
|
|
||||||
self.model = model.to(self.device)
|
|
||||||
# model.to doesn't change the cond_stage_model.device used to move the tokenizer output, so set it here
|
|
||||||
self.model.cond_stage_model.device = self.device
|
|
||||||
except AttributeError as e:
|
|
||||||
print(f'>> Error loading model. {str(e)}', file=sys.stderr)
|
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
|
||||||
raise SystemExit from e
|
|
||||||
|
|
||||||
self._set_sampler()
|
|
||||||
|
|
||||||
return self.model
|
|
||||||
|
|
||||||
def _set_sampler(self):
|
|
||||||
msg = f'>> Setting Sampler to {self.sampler_name}'
|
|
||||||
if self.sampler_name == 'plms':
|
|
||||||
self.sampler = PLMSSampler(self.model, device=self.device)
|
|
||||||
elif self.sampler_name == 'ddim':
|
|
||||||
self.sampler = DDIMSampler(self.model, device=self.device)
|
|
||||||
elif self.sampler_name == 'k_dpm_2_a':
|
|
||||||
self.sampler = KSampler(
|
|
||||||
self.model, 'dpm_2_ancestral', device=self.device
|
|
||||||
)
|
|
||||||
elif self.sampler_name == 'k_dpm_2':
|
|
||||||
self.sampler = KSampler(self.model, 'dpm_2', device=self.device)
|
|
||||||
elif self.sampler_name == 'k_euler_a':
|
|
||||||
self.sampler = KSampler(
|
|
||||||
self.model, 'euler_ancestral', device=self.device
|
|
||||||
)
|
|
||||||
elif self.sampler_name == 'k_euler':
|
|
||||||
self.sampler = KSampler(self.model, 'euler', device=self.device)
|
|
||||||
elif self.sampler_name == 'k_heun':
|
|
||||||
self.sampler = KSampler(self.model, 'heun', device=self.device)
|
|
||||||
elif self.sampler_name == 'k_lms':
|
|
||||||
self.sampler = KSampler(self.model, 'lms', device=self.device)
|
|
||||||
else:
|
|
||||||
msg = f'>> Unsupported Sampler: {self.sampler_name}, Defaulting to plms'
|
|
||||||
self.sampler = PLMSSampler(self.model, device=self.device)
|
|
||||||
|
|
||||||
print(msg)
|
|
||||||
|
|
||||||
def _load_model_from_config(self, config, ckpt):
|
|
||||||
print(f'>> Loading model from {ckpt}')
|
|
||||||
pl_sd = torch.load(ckpt, map_location='cpu')
|
|
||||||
# if "global_step" in pl_sd:
|
|
||||||
# print(f"Global Step: {pl_sd['global_step']}")
|
|
||||||
sd = pl_sd['state_dict']
|
|
||||||
model = instantiate_from_config(config.model)
|
|
||||||
m, u = model.load_state_dict(sd, strict=False)
|
|
||||||
model.to(self.device)
|
|
||||||
model.eval()
|
|
||||||
if self.full_precision:
|
|
||||||
print(
|
|
||||||
'Using slower but more accurate full-precision math (--full_precision)'
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
print(
|
|
||||||
'>> Using half precision math. Call with --full_precision to use more accurate but VRAM-intensive full precision.'
|
|
||||||
)
|
|
||||||
model.half()
|
|
||||||
return model
|
|
||||||
|
|
||||||
def _load_img(self, path, width, height, fit=False):
|
|
||||||
with Image.open(path) as img:
|
|
||||||
image = img.convert('RGB')
|
|
||||||
print(
|
|
||||||
f'>> loaded input image of size {image.width}x{image.height} from {path}'
|
|
||||||
)
|
|
||||||
|
|
||||||
# The logic here is:
|
|
||||||
# 1. If "fit" is true, then the image will be fit into the bounding box defined
|
|
||||||
# by width and height. It will do this in a way that preserves the init image's
|
|
||||||
# aspect ratio while preventing letterboxing. This means that if there is
|
|
||||||
# leftover horizontal space after rescaling the image to fit in the bounding box,
|
|
||||||
# the generated image's width will be reduced to the rescaled init image's width.
|
|
||||||
# Similarly for the vertical space.
|
|
||||||
# 2. Otherwise, if "fit" is false, then the image will be scaled, preserving its
|
|
||||||
# aspect ratio, to the nearest multiple of 64. Large images may generate an
|
|
||||||
# unexpected OOM error.
|
|
||||||
if fit:
|
|
||||||
image = self._fit_image(image,(width,height))
|
|
||||||
else:
|
|
||||||
image = self._squeeze_image(image)
|
|
||||||
image = np.array(image).astype(np.float32) / 255.0
|
|
||||||
image = image[None].transpose(0, 3, 1, 2)
|
|
||||||
image = torch.from_numpy(image)
|
|
||||||
return 2.0 * image - 1.0
|
|
||||||
|
|
||||||
def _squeeze_image(self,image):
|
|
||||||
x,y,resize_needed = self._resolution_check(image.width,image.height)
|
|
||||||
if resize_needed:
|
|
||||||
return InitImageResizer(image).resize(x,y)
|
|
||||||
return image
|
|
||||||
|
|
||||||
|
|
||||||
def _fit_image(self,image,max_dimensions):
|
|
||||||
w,h = max_dimensions
|
|
||||||
print(
|
|
||||||
f'>> image will be resized to fit inside a box {w}x{h} in size.'
|
|
||||||
)
|
|
||||||
if image.width > image.height:
|
|
||||||
h = None # by setting h to none, we tell InitImageResizer to fit into the width and calculate height
|
|
||||||
elif image.height > image.width:
|
|
||||||
w = None # ditto for w
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
image = InitImageResizer(image).resize(w,h) # note that InitImageResizer does the multiple of 64 truncation internally
|
|
||||||
print(
|
|
||||||
f'>> after adjusting image dimensions to be multiples of 64, init image is {image.width}x{image.height}'
|
|
||||||
)
|
|
||||||
return image
|
|
||||||
|
|
||||||
|
|
||||||
# TO DO: Move this and related weighted subprompt code into its own module.
|
|
||||||
def _split_weighted_subprompts(text, skip_normalize=False):
|
|
||||||
"""
|
|
||||||
grabs all text up to the first occurrence of ':'
|
|
||||||
uses the grabbed text as a sub-prompt, and takes the value following ':' as weight
|
|
||||||
if ':' has no value defined, defaults to 1.0
|
|
||||||
repeats until no text remaining
|
|
||||||
"""
|
|
||||||
prompt_parser = re.compile("""
|
|
||||||
(?P<prompt> # capture group for 'prompt'
|
|
||||||
(?:\\\:|[^:])+ # match one or more non ':' characters or escaped colons '\:'
|
|
||||||
) # end 'prompt'
|
|
||||||
(?: # non-capture group
|
|
||||||
:+ # match one or more ':' characters
|
|
||||||
(?P<weight> # capture group for 'weight'
|
|
||||||
-?\d+(?:\.\d+)? # match positive or negative integer or decimal number
|
|
||||||
)? # end weight capture group, make optional
|
|
||||||
\s* # strip spaces after weight
|
|
||||||
| # OR
|
|
||||||
$ # else, if no ':' then match end of line
|
|
||||||
) # end non-capture group
|
|
||||||
""", re.VERBOSE)
|
|
||||||
parsed_prompts = [(match.group("prompt").replace("\\:", ":"), float(
|
|
||||||
match.group("weight") or 1)) for match in re.finditer(prompt_parser, text)]
|
|
||||||
if skip_normalize:
|
|
||||||
return parsed_prompts
|
|
||||||
weight_sum = sum(map(lambda x: x[1], parsed_prompts))
|
|
||||||
if weight_sum == 0:
|
|
||||||
print(
|
|
||||||
"Warning: Subprompt weights add up to zero. Discarding and using even weights instead.")
|
|
||||||
equal_weight = 1 / len(parsed_prompts)
|
|
||||||
return [(x[0], equal_weight) for x in parsed_prompts]
|
|
||||||
return [(x[0], x[1] / weight_sum) for x in parsed_prompts]
|
|
||||||
|
|
||||||
# shows how the prompt is tokenized
|
|
||||||
# usually tokens have '</w>' to indicate end-of-word,
|
|
||||||
# but for readability it has been replaced with ' '
|
|
||||||
def _log_tokenization(self, text):
|
|
||||||
if not self.log_tokenization:
|
|
||||||
return
|
|
||||||
tokens = self.model.cond_stage_model.tokenizer._tokenize(text)
|
|
||||||
tokenized = ""
|
|
||||||
discarded = ""
|
|
||||||
usedTokens = 0
|
|
||||||
totalTokens = len(tokens)
|
|
||||||
for i in range(0, totalTokens):
|
|
||||||
token = tokens[i].replace('</w>', ' ')
|
|
||||||
# alternate color
|
|
||||||
s = (usedTokens % 6) + 1
|
|
||||||
if i < self.model.cond_stage_model.max_length:
|
|
||||||
tokenized = tokenized + f"\x1b[0;3{s};40m{token}"
|
|
||||||
usedTokens += 1
|
|
||||||
else: # over max token length
|
|
||||||
discarded = discarded + f"\x1b[0;3{s};40m{token}"
|
|
||||||
print(f"\nTokens ({usedTokens}):\n{tokenized}\x1b[0m")
|
|
||||||
if discarded != "":
|
|
||||||
print(
|
|
||||||
f"Tokens Discarded ({totalTokens-usedTokens}):\n{discarded}\x1b[0m")
|
|
||||||
|
|
||||||
def _resolution_check(self, width, height, log=False):
|
|
||||||
resize_needed = False
|
|
||||||
w, h = map(
|
|
||||||
lambda x: x - x % 64, (width, height)
|
|
||||||
) # resize to integer multiple of 64
|
|
||||||
if h != height or w != width:
|
|
||||||
if log:
|
|
||||||
print(
|
|
||||||
f'>> Provided width and height must be multiples of 64. Auto-resizing to {w}x{h}'
|
|
||||||
)
|
|
||||||
height = h
|
|
||||||
width = w
|
|
||||||
resize_needed = True
|
|
||||||
|
|
||||||
if (width * height) > (self.width * self.height):
|
|
||||||
print(">> This input is larger than your defaults. If you run out of memory, please use a smaller image.")
|
|
||||||
|
|
||||||
return width, height, resize_needed
|
|
||||||
|
|
||||||
|
|
||||||
def slerp(self, t, v0, v1, DOT_THRESHOLD=0.9995):
|
|
||||||
'''
|
|
||||||
Spherical linear interpolation
|
|
||||||
Args:
|
|
||||||
t (float/np.ndarray): Float value between 0.0 and 1.0
|
|
||||||
v0 (np.ndarray): Starting vector
|
|
||||||
v1 (np.ndarray): Final vector
|
|
||||||
DOT_THRESHOLD (float): Threshold for considering the two vectors as
|
|
||||||
colineal. Not recommended to alter this.
|
|
||||||
Returns:
|
|
||||||
v2 (np.ndarray): Interpolation vector between v0 and v1
|
|
||||||
'''
|
|
||||||
inputs_are_torch = False
|
|
||||||
if not isinstance(v0, np.ndarray):
|
|
||||||
inputs_are_torch = True
|
|
||||||
v0 = v0.detach().cpu().numpy()
|
|
||||||
if not isinstance(v1, np.ndarray):
|
|
||||||
inputs_are_torch = True
|
|
||||||
v1 = v1.detach().cpu().numpy()
|
|
||||||
|
|
||||||
dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
|
|
||||||
if np.abs(dot) > DOT_THRESHOLD:
|
|
||||||
v2 = (1 - t) * v0 + t * v1
|
|
||||||
else:
|
|
||||||
theta_0 = np.arccos(dot)
|
|
||||||
sin_theta_0 = np.sin(theta_0)
|
|
||||||
theta_t = theta_0 * t
|
|
||||||
sin_theta_t = np.sin(theta_t)
|
|
||||||
s0 = np.sin(theta_0 - theta_t) / sin_theta_0
|
|
||||||
s1 = sin_theta_t / sin_theta_0
|
|
||||||
v2 = s0 * v0 + s1 * v1
|
|
||||||
|
|
||||||
if inputs_are_torch:
|
|
||||||
v2 = torch.from_numpy(v2).to(self.device)
|
|
||||||
|
|
||||||
return v2
|
|
||||||
|
32
requirements-lin.txt
Normal file
32
requirements-lin.txt
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
albumentations==0.4.3
|
||||||
|
einops==0.3.0
|
||||||
|
huggingface-hub==0.8.1
|
||||||
|
imageio-ffmpeg==0.4.2
|
||||||
|
imageio==2.9.0
|
||||||
|
kornia==0.6.0
|
||||||
|
# pip will resolve the version which matches torch
|
||||||
|
numpy
|
||||||
|
omegaconf==2.1.1
|
||||||
|
opencv-python==4.6.0.66
|
||||||
|
pillow==9.2.0
|
||||||
|
pip>=22
|
||||||
|
pudb==2019.2
|
||||||
|
pytorch-lightning==1.4.2
|
||||||
|
streamlit==1.12.0
|
||||||
|
# "CompVis/taming-transformers" doesn't work
|
||||||
|
# ldm\models\autoencoder.py", line 6, in <module>
|
||||||
|
# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
|
||||||
|
# ModuleNotFoundError
|
||||||
|
taming-transformers-rom1504==0.0.6
|
||||||
|
test-tube>=0.7.5
|
||||||
|
torch-fidelity==0.3.0
|
||||||
|
torchmetrics==0.6.0
|
||||||
|
transformers==4.19.2
|
||||||
|
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
|
git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion
|
||||||
|
# No CUDA in PyPi builds
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org
|
||||||
|
torch==1.11.0
|
||||||
|
# Same as numpy - let pip do its thing
|
||||||
|
torchvision
|
||||||
|
-e .
|
@ -4,12 +4,13 @@ huggingface-hub==0.8.1
|
|||||||
imageio==2.9.0
|
imageio==2.9.0
|
||||||
imageio-ffmpeg==0.4.2
|
imageio-ffmpeg==0.4.2
|
||||||
kornia==0.6.0
|
kornia==0.6.0
|
||||||
numpy==1.19.2
|
numpy==1.23.1
|
||||||
|
--pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cpu
|
||||||
omegaconf==2.1.1
|
omegaconf==2.1.1
|
||||||
opencv-python==4.1.2.30
|
opencv-python==4.6.0.66
|
||||||
pillow==9.2.0
|
pillow==9.2.0
|
||||||
pudb==2019.2
|
pudb==2019.2
|
||||||
torch==1.11.0
|
torch==1.12.1
|
||||||
torchvision==0.12.0
|
torchvision==0.12.0
|
||||||
pytorch-lightning==1.4.2
|
pytorch-lightning==1.4.2
|
||||||
streamlit==1.12.0
|
streamlit==1.12.0
|
||||||
@ -19,4 +20,4 @@ torchmetrics==0.6.0
|
|||||||
transformers==4.19.2
|
transformers==4.19.2
|
||||||
-e git+https://github.com/openai/CLIP.git@main#egg=clip
|
-e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
-e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
-e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||||
-e git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion
|
-e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
32
requirements-win.txt
Normal file
32
requirements-win.txt
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
albumentations==0.4.3
|
||||||
|
einops==0.3.0
|
||||||
|
huggingface-hub==0.8.1
|
||||||
|
imageio-ffmpeg==0.4.2
|
||||||
|
imageio==2.9.0
|
||||||
|
kornia==0.6.0
|
||||||
|
# pip will resolve the version which matches torch
|
||||||
|
numpy
|
||||||
|
omegaconf==2.1.1
|
||||||
|
opencv-python==4.6.0.66
|
||||||
|
pillow==9.2.0
|
||||||
|
pip>=22
|
||||||
|
pudb==2019.2
|
||||||
|
pytorch-lightning==1.4.2
|
||||||
|
streamlit==1.12.0
|
||||||
|
# "CompVis/taming-transformers" doesn't work
|
||||||
|
# ldm\models\autoencoder.py", line 6, in <module>
|
||||||
|
# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
|
||||||
|
# ModuleNotFoundError
|
||||||
|
taming-transformers-rom1504==0.0.6
|
||||||
|
test-tube>=0.7.5
|
||||||
|
torch-fidelity==0.3.0
|
||||||
|
torchmetrics==0.6.0
|
||||||
|
transformers==4.19.2
|
||||||
|
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
|
git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion
|
||||||
|
# No CUDA in PyPi builds
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org
|
||||||
|
torch==1.11.0
|
||||||
|
# Same as numpy - let pip do its thing
|
||||||
|
torchvision
|
||||||
|
-e .
|
129
scripts/dream.py
129
scripts/dream.py
@ -10,36 +10,38 @@ import copy
|
|||||||
import warnings
|
import warnings
|
||||||
import time
|
import time
|
||||||
sys.path.insert(0, '.')
|
sys.path.insert(0, '.')
|
||||||
from ldm.dream.devices import choose_torch_device
|
|
||||||
import ldm.dream.readline
|
import ldm.dream.readline
|
||||||
from ldm.dream.pngwriter import PngWriter, PromptFormatter
|
from ldm.dream.pngwriter import PngWriter, PromptFormatter
|
||||||
from ldm.dream.server import DreamServer, ThreadingDreamServer
|
from ldm.dream.server import DreamServer, ThreadingDreamServer
|
||||||
from ldm.dream.image_util import make_grid
|
from ldm.dream.image_util import make_grid
|
||||||
|
from omegaconf import OmegaConf
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
"""Initialize command-line parsers and the diffusion model"""
|
"""Initialize command-line parsers and the diffusion model"""
|
||||||
arg_parser = create_argv_parser()
|
arg_parser = create_argv_parser()
|
||||||
opt = arg_parser.parse_args()
|
opt = arg_parser.parse_args()
|
||||||
|
|
||||||
if opt.laion400m:
|
if opt.laion400m:
|
||||||
# defaults suitable to the older latent diffusion weights
|
print('--laion400m flag has been deprecated. Please use --model laion400m instead.')
|
||||||
width = 256
|
sys.exit(-1)
|
||||||
height = 256
|
if opt.weights != 'model':
|
||||||
config = 'configs/latent-diffusion/txt2img-1p4B-eval.yaml'
|
print('--weights argument has been deprecated. Please configure ./configs/models.yaml, and call it using --model instead.')
|
||||||
weights = 'models/ldm/text2img-large/model.ckpt'
|
sys.exit(-1)
|
||||||
else:
|
|
||||||
# some defaults suitable for stable diffusion weights
|
try:
|
||||||
width = 512
|
models = OmegaConf.load(opt.config)
|
||||||
height = 512
|
width = models[opt.model].width
|
||||||
config = 'configs/stable-diffusion/v1-inference.yaml'
|
height = models[opt.model].height
|
||||||
if '.ckpt' in opt.weights:
|
config = models[opt.model].config
|
||||||
weights = opt.weights
|
weights = models[opt.model].weights
|
||||||
else:
|
except (FileNotFoundError, IOError, KeyError) as e:
|
||||||
weights = f'models/ldm/stable-diffusion-v1/{opt.weights}.ckpt'
|
print(f'{e}. Aborting.')
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
print('* Initializing, be patient...\n')
|
print('* Initializing, be patient...\n')
|
||||||
sys.path.append('.')
|
sys.path.append('.')
|
||||||
from pytorch_lightning import logging
|
from pytorch_lightning import logging
|
||||||
from ldm.simplet2i import T2I
|
from ldm.generate import Generate
|
||||||
|
|
||||||
# these two lines prevent a horrible warning message from appearing
|
# these two lines prevent a horrible warning message from appearing
|
||||||
# when the frozen CLIP tokenizer is imported
|
# when the frozen CLIP tokenizer is imported
|
||||||
@ -51,18 +53,18 @@ def main():
|
|||||||
# defaults passed on the command line.
|
# defaults passed on the command line.
|
||||||
# additional parameters will be added (or overriden) during
|
# additional parameters will be added (or overriden) during
|
||||||
# the user input loop
|
# the user input loop
|
||||||
t2i = T2I(
|
t2i = Generate(
|
||||||
width=width,
|
width = width,
|
||||||
height=height,
|
height = height,
|
||||||
sampler_name=opt.sampler_name,
|
sampler_name = opt.sampler_name,
|
||||||
weights=weights,
|
weights = weights,
|
||||||
full_precision=opt.full_precision,
|
full_precision = opt.full_precision,
|
||||||
config=config,
|
config = config,
|
||||||
grid = opt.grid,
|
grid = opt.grid,
|
||||||
# this is solely for recreating the prompt
|
# this is solely for recreating the prompt
|
||||||
latent_diffusion_weights=opt.laion400m,
|
seamless = opt.seamless,
|
||||||
embedding_path=opt.embedding_path,
|
embedding_path = opt.embedding_path,
|
||||||
device_type=opt.device
|
device_type = opt.device
|
||||||
)
|
)
|
||||||
|
|
||||||
# make sure the output directory exists
|
# make sure the output directory exists
|
||||||
@ -86,6 +88,9 @@ def main():
|
|||||||
print(f'{e}. Aborting.')
|
print(f'{e}. Aborting.')
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
|
if opt.seamless:
|
||||||
|
print(">> changed to seamless tiling mode")
|
||||||
|
|
||||||
# preload the model
|
# preload the model
|
||||||
tic = time.time()
|
tic = time.time()
|
||||||
t2i.load_model()
|
t2i.load_model()
|
||||||
@ -100,7 +105,7 @@ def main():
|
|||||||
|
|
||||||
cmd_parser = create_cmd_parser()
|
cmd_parser = create_cmd_parser()
|
||||||
if opt.web:
|
if opt.web:
|
||||||
dream_server_loop(t2i)
|
dream_server_loop(t2i, opt.host, opt.port, opt.outdir)
|
||||||
else:
|
else:
|
||||||
main_loop(t2i, opt.outdir, opt.prompt_as_dir, cmd_parser, infile)
|
main_loop(t2i, opt.outdir, opt.prompt_as_dir, cmd_parser, infile)
|
||||||
|
|
||||||
@ -188,8 +193,8 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile):
|
|||||||
# shotgun parsing, woo
|
# shotgun parsing, woo
|
||||||
parts = []
|
parts = []
|
||||||
broken = False # python doesn't have labeled loops...
|
broken = False # python doesn't have labeled loops...
|
||||||
for part in opt.with_variations.split(';'):
|
for part in opt.with_variations.split(','):
|
||||||
seed_and_weight = part.split(',')
|
seed_and_weight = part.split(':')
|
||||||
if len(seed_and_weight) != 2:
|
if len(seed_and_weight) != 2:
|
||||||
print(f'could not parse with_variation part "{part}"')
|
print(f'could not parse with_variation part "{part}"')
|
||||||
broken = True
|
broken = True
|
||||||
@ -311,7 +316,7 @@ def get_next_command(infile=None) -> str: #command string
|
|||||||
print(f'#{command}')
|
print(f'#{command}')
|
||||||
return command
|
return command
|
||||||
|
|
||||||
def dream_server_loop(t2i):
|
def dream_server_loop(t2i, host, port, outdir):
|
||||||
print('\n* --web was specified, starting web server...')
|
print('\n* --web was specified, starting web server...')
|
||||||
# Change working directory to the stable-diffusion directory
|
# Change working directory to the stable-diffusion directory
|
||||||
os.chdir(
|
os.chdir(
|
||||||
@ -320,9 +325,14 @@ def dream_server_loop(t2i):
|
|||||||
|
|
||||||
# Start server
|
# Start server
|
||||||
DreamServer.model = t2i
|
DreamServer.model = t2i
|
||||||
dream_server = ThreadingDreamServer(("0.0.0.0", 9090))
|
DreamServer.outdir = outdir
|
||||||
print("\nStarted Stable Diffusion dream server!")
|
dream_server = ThreadingDreamServer((host, port))
|
||||||
print("Point your browser at http://localhost:9090 or use the host's DNS name or IP address.")
|
print(">> Started Stable Diffusion dream server!")
|
||||||
|
if host == '0.0.0.0':
|
||||||
|
print(f"Point your browser at http://localhost:{port} or use the host's DNS name or IP address.")
|
||||||
|
else:
|
||||||
|
print(">> Default host address now 127.0.0.1 (localhost). Use --host 0.0.0.0 to bind any address.")
|
||||||
|
print(f">> Point your browser at http://{host}:{port}.")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
dream_server.serve_forever()
|
dream_server.serve_forever()
|
||||||
@ -388,9 +398,7 @@ def create_argv_parser():
|
|||||||
'--full_precision',
|
'--full_precision',
|
||||||
dest='full_precision',
|
dest='full_precision',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
help='Use slower full precision math for calculations',
|
help='Use more memory-intensive full precision math for calculations',
|
||||||
# MPS only functions with full precision, see https://github.com/lstein/stable-diffusion/issues/237
|
|
||||||
default=choose_torch_device() == 'mps',
|
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-g',
|
'-g',
|
||||||
@ -415,6 +423,11 @@ def create_argv_parser():
|
|||||||
default='outputs/img-samples',
|
default='outputs/img-samples',
|
||||||
help='Directory to save generated images and a log of prompts and seeds. Default: outputs/img-samples',
|
help='Directory to save generated images and a log of prompts and seeds. Default: outputs/img-samples',
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--seamless',
|
||||||
|
action='store_true',
|
||||||
|
help='Change the model to seamless tiling (circular) mode',
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--embedding_path',
|
'--embedding_path',
|
||||||
type=str,
|
type=str,
|
||||||
@ -431,7 +444,7 @@ def create_argv_parser():
|
|||||||
'--gfpgan_bg_upsampler',
|
'--gfpgan_bg_upsampler',
|
||||||
type=str,
|
type=str,
|
||||||
default='realesrgan',
|
default='realesrgan',
|
||||||
help='Background upsampler. Default: realesrgan. Options: realesrgan, none. Only used if --gfpgan is specified',
|
help='Background upsampler. Default: realesrgan. Options: realesrgan, none.',
|
||||||
|
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -458,6 +471,18 @@ def create_argv_parser():
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help='Start in web server mode.',
|
help='Start in web server mode.',
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--host',
|
||||||
|
type=str,
|
||||||
|
default='127.0.0.1',
|
||||||
|
help='Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network.'
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--port',
|
||||||
|
type=int,
|
||||||
|
default='9090',
|
||||||
|
help='Web server: Port to listen on'
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--weights',
|
'--weights',
|
||||||
default='model',
|
default='model',
|
||||||
@ -470,6 +495,16 @@ def create_argv_parser():
|
|||||||
default='cuda',
|
default='cuda',
|
||||||
help="device to run stable diffusion on. defaults to cuda `torch.cuda.current_device()` if available"
|
help="device to run stable diffusion on. defaults to cuda `torch.cuda.current_device()` if available"
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--model',
|
||||||
|
default='stable-diffusion-1.4',
|
||||||
|
help='Indicates which diffusion model to load. (currently "stable-diffusion-1.4" (default) or "laion400m")',
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--config',
|
||||||
|
default ='configs/models.yaml',
|
||||||
|
help ='Path to configuration file for alternate models.',
|
||||||
|
)
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
@ -515,6 +550,11 @@ def create_cmd_parser():
|
|||||||
default=None,
|
default=None,
|
||||||
help='Directory to save generated images and a log of prompts and seeds',
|
help='Directory to save generated images and a log of prompts and seeds',
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--seamless',
|
||||||
|
action='store_true',
|
||||||
|
help='Change the model to seamless tiling (circular) mode',
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-i',
|
'-i',
|
||||||
'--individual',
|
'--individual',
|
||||||
@ -527,6 +567,17 @@ def create_cmd_parser():
|
|||||||
type=str,
|
type=str,
|
||||||
help='Path to input image for img2img mode (supersedes width and height)',
|
help='Path to input image for img2img mode (supersedes width and height)',
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'-M',
|
||||||
|
'--mask',
|
||||||
|
type=str,
|
||||||
|
help='Path to inpainting mask; transparent areas will be painted over',
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--invert_mask',
|
||||||
|
action='store_true',
|
||||||
|
help='Invert the inpainting mask; opaque areas will be painted over',
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-T',
|
'-T',
|
||||||
'-fit',
|
'-fit',
|
||||||
@ -611,7 +662,7 @@ def create_cmd_parser():
|
|||||||
'--with_variations',
|
'--with_variations',
|
||||||
default=None,
|
default=None,
|
||||||
type=str,
|
type=str,
|
||||||
help='list of variations to apply, in the format `seed,weight;seed,weight;...'
|
help='list of variations to apply, in the format `seed:weight,seed:weight,...'
|
||||||
)
|
)
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
@ -1,11 +1,14 @@
|
|||||||
* {
|
* {
|
||||||
font-family: 'Arial';
|
font-family: 'Arial';
|
||||||
|
font-size: 100%;
|
||||||
}
|
}
|
||||||
#header {
|
body {
|
||||||
text-decoration: dotted underline;
|
font-size: 1em;
|
||||||
}
|
}
|
||||||
#search {
|
textarea {
|
||||||
margin-top: 20vh;
|
font-size: 0.95em;
|
||||||
|
}
|
||||||
|
header, form, #progress-section {
|
||||||
margin-left: auto;
|
margin-left: auto;
|
||||||
margin-right: auto;
|
margin-right: auto;
|
||||||
max-width: 1024px;
|
max-width: 1024px;
|
||||||
@ -13,46 +16,78 @@
|
|||||||
}
|
}
|
||||||
fieldset {
|
fieldset {
|
||||||
border: none;
|
border: none;
|
||||||
|
line-height: 2.2em;
|
||||||
|
}
|
||||||
|
select, input {
|
||||||
|
margin-right: 10px;
|
||||||
|
padding: 2px;
|
||||||
|
}
|
||||||
|
input[type=submit] {
|
||||||
|
background-color: #666;
|
||||||
|
color: white;
|
||||||
|
}
|
||||||
|
input[type=checkbox] {
|
||||||
|
margin-right: 0px;
|
||||||
|
width: 20px;
|
||||||
|
height: 20px;
|
||||||
|
vertical-align: middle;
|
||||||
|
}
|
||||||
|
input#seed {
|
||||||
|
margin-right: 0px;
|
||||||
}
|
}
|
||||||
div {
|
div {
|
||||||
padding: 10px 10px 10px 10px;
|
padding: 10px 10px 10px 10px;
|
||||||
}
|
}
|
||||||
#fieldset-search {
|
header {
|
||||||
|
margin-bottom: 16px;
|
||||||
|
}
|
||||||
|
header h1 {
|
||||||
|
margin-bottom: 0;
|
||||||
|
font-size: 2em;
|
||||||
|
}
|
||||||
|
#search-box {
|
||||||
display: flex;
|
display: flex;
|
||||||
}
|
}
|
||||||
#scaling-inprocess-message{
|
#scaling-inprocess-message {
|
||||||
font-weight: bold;
|
font-weight: bold;
|
||||||
font-style: italic;
|
font-style: italic;
|
||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
#prompt {
|
#prompt {
|
||||||
flex-grow: 1;
|
flex-grow: 1;
|
||||||
|
|
||||||
border-radius: 20px 0px 0px 20px;
|
|
||||||
padding: 5px 10px 5px 10px;
|
padding: 5px 10px 5px 10px;
|
||||||
border: 1px solid black;
|
border: 1px solid #999;
|
||||||
border-right: none;
|
|
||||||
outline: none;
|
outline: none;
|
||||||
}
|
}
|
||||||
#submit {
|
#submit {
|
||||||
border-radius: 0px 20px 20px 0px;
|
|
||||||
padding: 5px 10px 5px 10px;
|
padding: 5px 10px 5px 10px;
|
||||||
border: 1px solid black;
|
border: 1px solid #999;
|
||||||
}
|
}
|
||||||
#reset-all {
|
#reset-all, #remove-image {
|
||||||
|
margin-top: 12px;
|
||||||
|
font-size: 0.8em;
|
||||||
background-color: pink;
|
background-color: pink;
|
||||||
|
border: 1px solid #999;
|
||||||
|
border-radius: 4px;
|
||||||
}
|
}
|
||||||
#results {
|
#results {
|
||||||
text-align: center;
|
text-align: center;
|
||||||
max-width: 1000px;
|
|
||||||
margin: auto;
|
margin: auto;
|
||||||
padding-top: 10px;
|
padding-top: 10px;
|
||||||
}
|
}
|
||||||
#results img {
|
#results figure {
|
||||||
|
display: inline-block;
|
||||||
|
margin: 10px;
|
||||||
|
}
|
||||||
|
#results figcaption {
|
||||||
|
font-size: 0.8em;
|
||||||
|
padding: 3px;
|
||||||
|
color: #888;
|
||||||
cursor: pointer;
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
#results img {
|
||||||
height: 30vh;
|
height: 30vh;
|
||||||
border-radius: 5px;
|
border-radius: 5px;
|
||||||
margin: 10px;
|
|
||||||
}
|
}
|
||||||
#fieldset-config {
|
#fieldset-config {
|
||||||
line-height:2em;
|
line-height:2em;
|
||||||
@ -63,8 +98,15 @@ input[type="number"] {
|
|||||||
#seed {
|
#seed {
|
||||||
width: 150px;
|
width: 150px;
|
||||||
}
|
}
|
||||||
hr {
|
button#reset-seed {
|
||||||
width: 200px;
|
font-size: 1.7em;
|
||||||
|
background: #efefef;
|
||||||
|
border: 1px solid #999;
|
||||||
|
border-radius: 4px;
|
||||||
|
line-height: 0.8;
|
||||||
|
margin: 0 10px 0 0;
|
||||||
|
padding: 0 5px 3px;
|
||||||
|
vertical-align: middle;
|
||||||
}
|
}
|
||||||
label {
|
label {
|
||||||
white-space: nowrap;
|
white-space: nowrap;
|
||||||
@ -92,6 +134,4 @@ label {
|
|||||||
#progress-section {
|
#progress-section {
|
||||||
background-color: #F5F5F5;
|
background-color: #F5F5F5;
|
||||||
}
|
}
|
||||||
#about {
|
|
||||||
background-color: #DCDCDC;
|
|
||||||
}
|
|
||||||
|
@ -10,77 +10,87 @@
|
|||||||
<script src="static/dream_web/index.js"></script>
|
<script src="static/dream_web/index.js"></script>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<div id="search">
|
<header>
|
||||||
<h2 id="header">Stable Diffusion Dream Server</h2>
|
<h1>Stable Diffusion Dream Server</h1>
|
||||||
|
<div id="about">
|
||||||
|
For news and support for this web service, visit our <a href="http://github.com/lstein/stable-diffusion">GitHub site</a>
|
||||||
|
</div>
|
||||||
|
</header>
|
||||||
|
|
||||||
|
<main>
|
||||||
<form id="generate-form" method="post" action="#">
|
<form id="generate-form" method="post" action="#">
|
||||||
<div id="txt2img">
|
<fieldset id="txt2img">
|
||||||
<fieldset id="fieldset-search">
|
<div id="search-box">
|
||||||
<input type="text" id="prompt" name="prompt">
|
<textarea rows="3" id="prompt" name="prompt"></textarea>
|
||||||
<input type="submit" id="submit" value="Generate">
|
<input type="submit" id="submit" value="Generate">
|
||||||
</fieldset>
|
</div>
|
||||||
<fieldset id="fieldset-config">
|
</fieldset>
|
||||||
<label for="iterations">Images to generate:</label>
|
<fieldset id="fieldset-config">
|
||||||
<input value="1" type="number" id="iterations" name="iterations" size="4">
|
<label for="iterations">Images to generate:</label>
|
||||||
<label for="steps">Steps:</label>
|
<input value="1" type="number" id="iterations" name="iterations" size="4">
|
||||||
<input value="50" type="number" id="steps" name="steps">
|
<label for="steps">Steps:</label>
|
||||||
<label for="cfgscale">Cfg Scale:</label>
|
<input value="50" type="number" id="steps" name="steps">
|
||||||
<input value="7.5" type="number" id="cfgscale" name="cfgscale" step="any">
|
<label for="cfgscale">Cfg Scale:</label>
|
||||||
<label for="sampler">Sampler:</label>
|
<input value="7.5" type="number" id="cfgscale" name="cfgscale" step="any">
|
||||||
<select id="sampler" name="sampler" value="k_lms">
|
<label for="sampler">Sampler:</label>
|
||||||
<option value="ddim">DDIM</option>
|
<select id="sampler" name="sampler" value="k_lms">
|
||||||
<option value="plms">PLMS</option>
|
<option value="ddim">DDIM</option>
|
||||||
<option value="k_lms" selected>KLMS</option>
|
<option value="plms">PLMS</option>
|
||||||
<option value="k_dpm_2">KDPM_2</option>
|
<option value="k_lms" selected>KLMS</option>
|
||||||
<option value="k_dpm_2_a">KDPM_2A</option>
|
<option value="k_dpm_2">KDPM_2</option>
|
||||||
<option value="k_euler">KEULER</option>
|
<option value="k_dpm_2_a">KDPM_2A</option>
|
||||||
<option value="k_euler_a">KEULER_A</option>
|
<option value="k_euler">KEULER</option>
|
||||||
<option value="k_heun">KHEUN</option>
|
<option value="k_euler_a">KEULER_A</option>
|
||||||
</select>
|
<option value="k_heun">KHEUN</option>
|
||||||
<br>
|
</select>
|
||||||
<label title="Set to multiple of 64" for="width">Width:</label>
|
<input type="checkbox" name="seamless" id="seamless">
|
||||||
<select id="width" name="width" value="512">
|
<label for="seamless">Seamless circular tiling</label>
|
||||||
<option value="64">64</option> <option value="128">128</option>
|
<br>
|
||||||
<option value="192">192</option> <option value="256">256</option>
|
<label title="Set to multiple of 64" for="width">Width:</label>
|
||||||
<option value="320">320</option> <option value="384">384</option>
|
<select id="width" name="width" value="512">
|
||||||
<option value="448">448</option> <option value="512" selected>512</option>
|
<option value="64">64</option> <option value="128">128</option>
|
||||||
<option value="576">576</option> <option value="640">640</option>
|
<option value="192">192</option> <option value="256">256</option>
|
||||||
<option value="704">704</option> <option value="768">768</option>
|
<option value="320">320</option> <option value="384">384</option>
|
||||||
<option value="832">832</option> <option value="896">896</option>
|
<option value="448">448</option> <option value="512" selected>512</option>
|
||||||
<option value="960">960</option> <option value="1024">1024</option>
|
<option value="576">576</option> <option value="640">640</option>
|
||||||
</select>
|
<option value="704">704</option> <option value="768">768</option>
|
||||||
<label title="Set to multiple of 64" for="height">Height:</label>
|
<option value="832">832</option> <option value="896">896</option>
|
||||||
<select id="height" name="height" value="512">
|
<option value="960">960</option> <option value="1024">1024</option>
|
||||||
<option value="64">64</option> <option value="128">128</option>
|
</select>
|
||||||
<option value="192">192</option> <option value="256">256</option>
|
<label title="Set to multiple of 64" for="height">Height:</label>
|
||||||
<option value="320">320</option> <option value="384">384</option>
|
<select id="height" name="height" value="512">
|
||||||
<option value="448">448</option> <option value="512" selected>512</option>
|
<option value="64">64</option> <option value="128">128</option>
|
||||||
<option value="576">576</option> <option value="640">640</option>
|
<option value="192">192</option> <option value="256">256</option>
|
||||||
<option value="704">704</option> <option value="768">768</option>
|
<option value="320">320</option> <option value="384">384</option>
|
||||||
<option value="832">832</option> <option value="896">896</option>
|
<option value="448">448</option> <option value="512" selected>512</option>
|
||||||
<option value="960">960</option> <option value="1024">1024</option>
|
<option value="576">576</option> <option value="640">640</option>
|
||||||
</select>
|
<option value="704">704</option> <option value="768">768</option>
|
||||||
<label title="Set to -1 for random seed" for="seed">Seed:</label>
|
<option value="832">832</option> <option value="896">896</option>
|
||||||
<input value="-1" type="number" id="seed" name="seed">
|
<option value="960">960</option> <option value="1024">1024</option>
|
||||||
<button type="button" id="reset-seed">↺</button>
|
</select>
|
||||||
<label title="Threshold" for="threshold">Threshold:</label>
|
|
||||||
<input value="0" type="number" id="threshold" name="threshold" step="any">
|
<label title="Set to -1 for random seed" for="seed">Seed:</label>
|
||||||
<label title="Perlin" for="perlin">Perlin:</label>
|
<input value="-1" type="number" id="seed" name="seed">
|
||||||
<input value="0" type="number" id="perlin" name="perlin" step="any">
|
<button type="button" id="reset-seed">↺</button>
|
||||||
<input type="checkbox" name="progress_images" id="progress_images">
|
<input type="checkbox" name="progress_images" id="progress_images">
|
||||||
<label for="progress_images">Display in-progress images (slows down generation):</label>
|
<label for="progress_images">Display in-progress images (slower):</label>
|
||||||
<button type="button" id="reset-all">Reset to Defaults</button>
|
<label title="Threshold" for="threshold">Threshold:</label>
|
||||||
</div>
|
<input value="0" type="number" id="threshold" name="threshold" step="any">
|
||||||
<div id="img2img">
|
<label title="Perlin" for="perlin">Perlin:</label>
|
||||||
<label title="Upload an image to use img2img" for="initimg">Initial image:</label>
|
<input value="0" type="number" id="perlin" name="perlin" step="any">
|
||||||
<input type="file" id="initimg" name="initimg" accept=".jpg, .jpeg, .png">
|
<button type="button" id="reset-all">Reset to Defaults</button>
|
||||||
|
</fieldset>
|
||||||
|
<fieldset id="img2img">
|
||||||
|
<label title="Upload an image to use img2img" for="initimg">Initial image:</label>
|
||||||
|
<input type="file" id="initimg" name="initimg" accept=".jpg, .jpeg, .png">
|
||||||
|
<button type="button" id="remove-image">Remove Image</button>
|
||||||
<br>
|
<br>
|
||||||
<label for="strength">Img2Img Strength:</label>
|
<label for="strength">Img2Img Strength:</label>
|
||||||
<input value="0.75" type="number" id="strength" name="strength" step="0.01" min="0" max="1">
|
<input value="0.75" type="number" id="strength" name="strength" step="0.01" min="0" max="1">
|
||||||
<input type="checkbox" id="fit" name="fit" checked>
|
<input type="checkbox" id="fit" name="fit" checked>
|
||||||
<label title="Rescale image to fit within requested width and height" for="fit">Fit to width/height:</label>
|
<label title="Rescale image to fit within requested width and height" for="fit">Fit to width/height:</label>
|
||||||
</div>
|
</fieldset>
|
||||||
<div id="gfpgan">
|
<fieldset id="gfpgan">
|
||||||
<label title="Strength of the gfpgan (face fixing) algorithm." for="gfpgan_strength">GPFGAN Strength (0 to disable):</label>
|
<label title="Strength of the gfpgan (face fixing) algorithm." for="gfpgan_strength">GPFGAN Strength (0 to disable):</label>
|
||||||
<input value="0.8" min="0" max="1" type="number" id="gfpgan_strength" name="gfpgan_strength" step="0.05">
|
<input value="0.8" min="0" max="1" type="number" id="gfpgan_strength" name="gfpgan_strength" step="0.05">
|
||||||
<label title="Upscaling to perform using ESRGAN." for="upscale_level">Upscaling Level</label>
|
<label title="Upscaling to perform using ESRGAN." for="upscale_level">Upscaling Level</label>
|
||||||
@ -91,25 +101,26 @@
|
|||||||
</select>
|
</select>
|
||||||
<label title="Strength of the esrgan (upscaling) algorithm." for="upscale_strength">Upscale Strength:</label>
|
<label title="Strength of the esrgan (upscaling) algorithm." for="upscale_strength">Upscale Strength:</label>
|
||||||
<input value="0.75" min="0" max="1" type="number" id="upscale_strength" name="upscale_strength" step="0.05">
|
<input value="0.75" min="0" max="1" type="number" id="upscale_strength" name="upscale_strength" step="0.05">
|
||||||
</div>
|
|
||||||
</fieldset>
|
</fieldset>
|
||||||
</form>
|
</form>
|
||||||
<div id="about">For news and support for this web service, visit our <a href="http://github.com/lstein/stable-diffusion">GitHub site</a></div>
|
|
||||||
<br>
|
<br>
|
||||||
<div id="progress-section">
|
<section id="progress-section">
|
||||||
<progress id="progress-bar" value="0" max="1"></progress>
|
<div id="progress-container">
|
||||||
<span id="cancel-button" title="Cancel">✖</span>
|
<progress id="progress-bar" value="0" max="1"></progress>
|
||||||
<br>
|
<span id="cancel-button" title="Cancel">✖</span>
|
||||||
<img id="progress-image" src='data:image/svg+xml,<svg xmlns="http://www.w3.org/2000/svg"/>'></img>
|
<br>
|
||||||
<div id="scaling-inprocess-message">
|
<img id="progress-image" src='data:image/svg+xml,<svg xmlns="http://www.w3.org/2000/svg"/>'></img>
|
||||||
<i><span>Postprocessing...</span><span id="processing_cnt">1/3</span></i>
|
<div id="scaling-inprocess-message">
|
||||||
|
<i><span>Postprocessing...</span><span id="processing_cnt">1/3</span></i>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<div id="results">
|
||||||
|
<div id="no-results-message">
|
||||||
|
<i><p>No results...</p></i>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</main>
|
||||||
<div id="results">
|
|
||||||
<div id="no-results-message">
|
|
||||||
<i><p>No results...</p></i>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
|
@ -8,18 +8,25 @@ function toBase64(file) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function appendOutput(src, seed, config) {
|
function appendOutput(src, seed, config) {
|
||||||
let outputNode = document.createElement("img");
|
let outputNode = document.createElement("figure");
|
||||||
outputNode.src = src;
|
|
||||||
|
|
||||||
let altText = seed.toString() + " | " + config.prompt;
|
let altText = seed.toString() + " | " + config.prompt;
|
||||||
outputNode.alt = altText;
|
|
||||||
outputNode.title = altText;
|
const figureContents = `
|
||||||
|
<a href="${src}" target="_blank">
|
||||||
|
<img src="${src}" alt="${altText}" title="${altText}">
|
||||||
|
</a>
|
||||||
|
<figcaption>${seed}</figcaption>
|
||||||
|
`;
|
||||||
|
|
||||||
|
outputNode.innerHTML = figureContents;
|
||||||
|
let figcaption = outputNode.querySelector('figcaption')
|
||||||
|
|
||||||
// Reload image config
|
// Reload image config
|
||||||
outputNode.addEventListener('click', () => {
|
figcaption.addEventListener('click', () => {
|
||||||
let form = document.querySelector("#generate-form");
|
let form = document.querySelector("#generate-form");
|
||||||
for (const [k, v] of new FormData(form)) {
|
for (const [k, v] of new FormData(form)) {
|
||||||
form.querySelector(`*[name=${k}]`).value = config[k];
|
if (k == 'initimg') { continue; }
|
||||||
|
form.querySelector(`*[name=${k}]`).value = config[k];
|
||||||
}
|
}
|
||||||
document.querySelector("#seed").value = seed;
|
document.querySelector("#seed").value = seed;
|
||||||
|
|
||||||
@ -59,6 +66,7 @@ async function generateSubmit(form) {
|
|||||||
|
|
||||||
// Convert file data to base64
|
// Convert file data to base64
|
||||||
let formData = Object.fromEntries(new FormData(form));
|
let formData = Object.fromEntries(new FormData(form));
|
||||||
|
formData.initimg_name = formData.initimg.name
|
||||||
formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null;
|
formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null;
|
||||||
|
|
||||||
let strength = formData.strength;
|
let strength = formData.strength;
|
||||||
@ -147,6 +155,9 @@ window.onload = () => {
|
|||||||
document.querySelector("#reset-all").addEventListener('click', (e) => {
|
document.querySelector("#reset-all").addEventListener('click', (e) => {
|
||||||
clearFields(e.target.form);
|
clearFields(e.target.form);
|
||||||
});
|
});
|
||||||
|
document.querySelector("#remove-image").addEventListener('click', (e) => {
|
||||||
|
initimg.value=null;
|
||||||
|
});
|
||||||
loadFields(document.querySelector("#generate-form"));
|
loadFields(document.querySelector("#generate-form"));
|
||||||
|
|
||||||
document.querySelector('#cancel-button').addEventListener('click', () => {
|
document.querySelector('#cancel-button').addEventListener('click', () => {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user