From df95a7ddf2e8eb055552617cec258d0cdc371360 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 16 Sep 2022 19:58:16 -0400 Subject: [PATCH 01/11] respect --outdir again; fix issue #628 --- ldm/dream/args.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/ldm/dream/args.py b/ldm/dream/args.py index 9201f46dfa..24056d340e 100644 --- a/ldm/dream/args.py +++ b/ldm/dream/args.py @@ -216,9 +216,7 @@ class Args(object): # the arg value. For example, the --grid and --individual options are a little # funny because of their push/pull relationship. This is how to handle it. if name=='grid': - return value_arg or value_cmd # arg supersedes cmd - if name=='individual': - return value_cmd or value_arg # cmd supersedes arg + return not cmd_switches.individual and value_arg # arg supersedes cmd if value_cmd is not None: return value_cmd else: @@ -294,11 +292,6 @@ class Args(object): action='store_true', help='Place images in subdirectories named after the prompt.', ) - render_group.add_argument( - '--seamless', - action='store_true', - help='Change the model to seamless tiling (circular) mode', - ) render_group.add_argument( '--grid', '-g', @@ -416,8 +409,8 @@ class Args(object): help='generate a grid' ) render_group.add_argument( - '--individual', '-i', + '--individual', action='store_true', help='override command-line --grid setting and generate individual images' ) @@ -448,7 +441,6 @@ class Args(object): '--outdir', '-o', type=str, - default='outputs/img-samples', help='Directory to save generated images and a log of prompts and seeds', ) img2img_group.add_argument( From df4c80f177b892b4190b932fc30aa5ec65649608 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 16 Sep 2022 19:58:45 -0400 Subject: [PATCH 02/11] respect --outdir again; fix issue #628 --- .github/workflows/cache-model.yml | 64 ---------------- .github/workflows/create-caches.yml | 70 ++++++++++++++++++ .github/workflows/macos12-miniconda.yml | 80 -------------------- .github/workflows/test-dream-conda.yml | 97 +++++++++++++++++++++++++ docs/installation/INSTALL_MAC.md | 35 ++++++--- ldm/dream/server.py | 3 +- ldm/modules/embedding_manager.py | 4 +- tests/dev_prompts.txt | 1 + tests/preflight_prompts.txt | 9 +++ tests/prompts.txt | 1 - 10 files changed, 206 insertions(+), 158 deletions(-) delete mode 100644 .github/workflows/cache-model.yml create mode 100644 .github/workflows/create-caches.yml delete mode 100644 .github/workflows/macos12-miniconda.yml create mode 100644 .github/workflows/test-dream-conda.yml create mode 100644 tests/dev_prompts.txt create mode 100644 tests/preflight_prompts.txt delete mode 100644 tests/prompts.txt diff --git a/.github/workflows/cache-model.yml b/.github/workflows/cache-model.yml deleted file mode 100644 index 2682943eef..0000000000 --- a/.github/workflows/cache-model.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Cache Model -on: - workflow_dispatch -jobs: - build: - strategy: - matrix: - os: [ macos-12 ] - name: Create Caches using ${{ matrix.os }} - runs-on: ${{ matrix.os }} - steps: - - name: Checkout sources - uses: actions/checkout@v3 - - name: Cache model - id: cache-sd-v1-4 - uses: actions/cache@v3 - env: - cache-name: cache-sd-v1-4 - with: - path: models/ldm/stable-diffusion-v1/model.ckpt - key: ${{ env.cache-name }} - restore-keys: | - ${{ env.cache-name }} - - name: Download Stable Diffusion v1.4 model - if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }} - continue-on-error: true - run: | - if [ ! -e models/ldm/stable-diffusion-v1 ]; then - mkdir -p models/ldm/stable-diffusion-v1 - fi - if [ ! -e models/ldm/stable-diffusion-v1/model.ckpt ]; then - curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }} - fi -# Uncomment this when we no longer make changes to environment-mac.yaml -# - name: Cache environment -# id: cache-conda-env-ldm -# uses: actions/cache@v3 -# env: -# cache-name: cache-conda-env-ldm -# with: -# path: ~/.conda/envs/ldm -# key: ${{ env.cache-name }} -# restore-keys: | -# ${{ env.cache-name }} - - name: Install dependencies -# if: ${{ steps.cache-conda-env-ldm.outputs.cache-hit != 'true' }} - run: | - conda env create -f environment-mac.yaml - - name: Cache hugginface and torch models - id: cache-hugginface-torch - uses: actions/cache@v3 - env: - cache-name: cache-hugginface-torch - with: - path: ~/.cache - key: ${{ env.cache-name }} - restore-keys: | - ${{ env.cache-name }} - - name: Download Huggingface and Torch models - if: ${{ steps.cache-hugginface-torch.outputs.cache-hit != 'true' }} - continue-on-error: true - run: | - export PYTHON_BIN=/usr/local/miniconda/envs/ldm/bin/python - $PYTHON_BIN scripts/preload_models.py \ No newline at end of file diff --git a/.github/workflows/create-caches.yml b/.github/workflows/create-caches.yml new file mode 100644 index 0000000000..951718af1b --- /dev/null +++ b/.github/workflows/create-caches.yml @@ -0,0 +1,70 @@ +name: Create Caches +on: + workflow_dispatch +jobs: + build: + strategy: + matrix: + os: [ ubuntu-latest, macos-12 ] + name: Create Caches on ${{ matrix.os }} conda + runs-on: ${{ matrix.os }} + steps: + - name: Set platform variables + id: vars + run: | + if [ "$RUNNER_OS" = "macOS" ]; then + echo "::set-output name=ENV_FILE::environment-mac.yaml" + echo "::set-output name=PYTHON_BIN::/usr/local/miniconda/envs/ldm/bin/python" + elif [ "$RUNNER_OS" = "Linux" ]; then + echo "::set-output name=ENV_FILE::environment.yaml" + echo "::set-output name=PYTHON_BIN::/usr/share/miniconda/envs/ldm/bin/python" + fi + - name: Checkout sources + uses: actions/checkout@v3 + - name: Use Cached Stable Diffusion v1.4 Model + id: cache-sd-v1-4 + uses: actions/cache@v3 + env: + cache-name: cache-sd-v1-4 + with: + path: models/ldm/stable-diffusion-v1/model.ckpt + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }} + - name: Download Stable Diffusion v1.4 Model + if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }} + run: | + if [ ! -e models/ldm/stable-diffusion-v1 ]; then + mkdir -p models/ldm/stable-diffusion-v1 + fi + if [ ! -e models/ldm/stable-diffusion-v1/model.ckpt ]; then + curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }} + fi + - name: Use Cached Dependencies + id: cache-conda-env-ldm + uses: actions/cache@v3 + env: + cache-name: cache-conda-env-ldm + with: + path: ~/.conda/envs/ldm + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }}-${{ runner.os }}-${{ hashFiles(steps.vars.outputs.ENV_FILE) }} + - name: Install Dependencies + if: ${{ steps.cache-conda-env-ldm.outputs.cache-hit != 'true' }} + run: | + conda env create -f ${{ steps.vars.outputs.ENV_FILE }} + - name: Use Cached Huggingface and Torch models + id: cache-huggingface-torch + uses: actions/cache@v3 + env: + cache-name: cache-huggingface-torch + with: + path: ~/.cache + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }}-${{ hashFiles('scripts/preload_models.py') }} + - name: Download Huggingface and Torch models + if: ${{ steps.cache-huggingface-torch.outputs.cache-hit != 'true' }} + run: | + ${{ steps.vars.outputs.PYTHON_BIN }} scripts/preload_models.py diff --git a/.github/workflows/macos12-miniconda.yml b/.github/workflows/macos12-miniconda.yml deleted file mode 100644 index 18f21277c0..0000000000 --- a/.github/workflows/macos12-miniconda.yml +++ /dev/null @@ -1,80 +0,0 @@ -name: Build -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] -jobs: - build: - strategy: - matrix: - os: [ macos-12 ] - name: Build on ${{ matrix.os }} miniconda - runs-on: ${{ matrix.os }} - steps: - - name: Checkout sources - uses: actions/checkout@v3 - - name: Cache model - id: cache-sd-v1-4 - uses: actions/cache@v3 - env: - cache-name: cache-sd-v1-4 - with: - path: models/ldm/stable-diffusion-v1/model.ckpt - key: ${{ env.cache-name }} - restore-keys: | - ${{ env.cache-name }} - - name: Download Stable Diffusion v1.4 model - if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }} - continue-on-error: true - run: | - if [ ! -e models/ldm/stable-diffusion-v1 ]; then - mkdir -p models/ldm/stable-diffusion-v1 - fi - if [ ! -e models/ldm/stable-diffusion-v1/model.ckpt ]; then - curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }} - fi -# Uncomment this when we no longer make changes to environment-mac.yaml -# - name: Cache environment -# id: cache-conda-env-ldm -# uses: actions/cache@v3 -# env: -# cache-name: cache-conda-env-ldm -# with: -# path: ~/.conda/envs/ldm -# key: ${{ env.cache-name }} -# restore-keys: | -# ${{ env.cache-name }} - - name: Install dependencies -# if: ${{ steps.cache-conda-env-ldm.outputs.cache-hit != 'true' }} - run: | - conda env create -f environment-mac.yaml - - name: Cache hugginface and torch models - id: cache-hugginface-torch - uses: actions/cache@v3 - env: - cache-name: cache-hugginface-torch - with: - path: ~/.cache - key: ${{ env.cache-name }} - restore-keys: | - ${{ env.cache-name }} - - name: Download Huggingface and Torch models - if: ${{ steps.cache-hugginface-torch.outputs.cache-hit != 'true' }} - continue-on-error: true - run: | - export PYTHON_BIN=/usr/local/miniconda/envs/ldm/bin/python - $PYTHON_BIN scripts/preload_models.py - - name: Run the tests - run: | - # Note, can't "activate" via automation, and activation is just env vars and path - export PYTHON_BIN=/usr/local/miniconda/envs/ldm/bin/python - export PYTORCH_ENABLE_MPS_FALLBACK=1 - $PYTHON_BIN scripts/preload_models.py - mkdir -p outputs/img-samples - time $PYTHON_BIN scripts/dream.py --from_file tests/prompts.txt outputs/img-samples/err.log > outputs/img-samples/out.log - - name: Archive results - uses: actions/upload-artifact@v3 - with: - name: results - path: outputs/img-samples \ No newline at end of file diff --git a/.github/workflows/test-dream-conda.yml b/.github/workflows/test-dream-conda.yml new file mode 100644 index 0000000000..3bd9b24582 --- /dev/null +++ b/.github/workflows/test-dream-conda.yml @@ -0,0 +1,97 @@ +name: Test Dream with Conda +on: + push: + branches: + - 'main' + - 'development' +jobs: + os_matrix: + strategy: + matrix: + os: [ ubuntu-latest, macos-12 ] + name: Test dream.py on ${{ matrix.os }} with conda + runs-on: ${{ matrix.os }} + steps: + - run: | + echo The PR was merged + - name: Set platform variables + id: vars + run: | + # Note, can't "activate" via github action; specifying the env's python has the same effect + if [ "$RUNNER_OS" = "macOS" ]; then + echo "::set-output name=ENV_FILE::environment-mac.yaml" + echo "::set-output name=PYTHON_BIN::/usr/local/miniconda/envs/ldm/bin/python" + elif [ "$RUNNER_OS" = "Linux" ]; then + echo "::set-output name=ENV_FILE::environment.yaml" + echo "::set-output name=PYTHON_BIN::/usr/share/miniconda/envs/ldm/bin/python" + fi + - name: Checkout sources + uses: actions/checkout@v3 + - name: Use Cached Stable Diffusion v1.4 Model + id: cache-sd-v1-4 + uses: actions/cache@v3 + env: + cache-name: cache-sd-v1-4 + with: + path: models/ldm/stable-diffusion-v1/model.ckpt + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }} + - name: Download Stable Diffusion v1.4 Model + if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }} + run: | + if [ ! -e models/ldm/stable-diffusion-v1 ]; then + mkdir -p models/ldm/stable-diffusion-v1 + fi + if [ ! -e models/ldm/stable-diffusion-v1/model.ckpt ]; then + curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }} + fi + - name: Use Cached Dependencies + id: cache-conda-env-ldm + uses: actions/cache@v3 + env: + cache-name: cache-conda-env-ldm + with: + path: ~/.conda/envs/ldm + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }}-${{ runner.os }}-${{ hashFiles(steps.vars.outputs.ENV_FILE) }} + - name: Install Dependencies + if: ${{ steps.cache-conda-env-ldm.outputs.cache-hit != 'true' }} + run: | + conda env create -f ${{ steps.vars.outputs.ENV_FILE }} + - name: Use Cached Huggingface and Torch models + id: cache-hugginface-torch + uses: actions/cache@v3 + env: + cache-name: cache-hugginface-torch + with: + path: ~/.cache + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }}-${{ hashFiles('scripts/preload_models.py') }} + - name: Download Huggingface and Torch models + if: ${{ steps.cache-hugginface-torch.outputs.cache-hit != 'true' }} + run: | + ${{ steps.vars.outputs.PYTHON_BIN }} scripts/preload_models.py +# - name: Run tmate +# uses: mxschmitt/action-tmate@v3 +# timeout-minutes: 30 + - name: Run the tests + run: | + # Note, can't "activate" via github action; specifying the env's python has the same effect + if [ $(uname) = "Darwin" ]; then + export PYTORCH_ENABLE_MPS_FALLBACK=1 + fi + # Utterly hacky, but I don't know how else to do this + if [[ ${{ github.ref }} == 'refs/heads/master' ]]; then + time ${{ steps.vars.outputs.PYTHON_BIN }} scripts/dream.py --from_file tests/preflight_prompts.txt --full_precision + elif [[ ${{ github.ref }} == 'refs/heads/development' ]]; then + time ${{ steps.vars.outputs.PYTHON_BIN }} scripts/dream.py --from_file tests/dev_prompts.txt --full_precision + fi + mkdir -p outputs/img-samples + - name: Archive results + uses: actions/upload-artifact@v3 + with: + name: results + path: outputs/img-samples diff --git a/docs/installation/INSTALL_MAC.md b/docs/installation/INSTALL_MAC.md index 39398c36ac..71535980f5 100644 --- a/docs/installation/INSTALL_MAC.md +++ b/docs/installation/INSTALL_MAC.md @@ -7,10 +7,7 @@ title: macOS - macOS 12.3 Monterey or later - Python - Patience -- Apple Silicon\* - -\*I haven't tested any of this on Intel Macs but I have read that one person got -it to work, so Apple Silicon might not be requried. +- Apple Silicon or Intel Mac Things have moved really fast and so these instructions change often and are often out-of-date. One of the problems is that there are so many different ways @@ -59,9 +56,13 @@ First get the weights checkpoint download started - it's big: # install python 3, git, cmake, protobuf: brew install cmake protobuf rust -# install miniconda (M1 arm64 version): - curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o Miniconda3-latest-MacOSX-arm64.sh - /bin/bash Miniconda3-latest-MacOSX-arm64.sh +# install miniconda for M1 arm64: +curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o Miniconda3-latest-MacOSX-arm64.sh +/bin/bash Miniconda3-latest-MacOSX-arm64.sh + +# OR install miniconda for Intel: +curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -o Miniconda3-latest-MacOSX-x86_64.sh +/bin/bash Miniconda3-latest-MacOSX-x86_64.sh # EITHER WAY, @@ -82,15 +83,22 @@ brew install cmake protobuf rust ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt -# install packages - PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yaml - conda activate ldm +# install packages for arm64 +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yaml +conda activate ldm + +# OR install packages for x86_64 +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-x86_64 conda env create -f environment-mac.yaml +conda activate ldm # only need to do this once python scripts/preload_models.py # run SD! python scripts/dream.py --full_precision # half-precision requires autocast and won't work + +# or run the web interface! +python scripts/dream.py --web ``` The original scripts should work as well. @@ -181,7 +189,12 @@ There are several causes of these errors. - Third, if it says you're missing taming you need to rebuild your virtual environment. -`conda env remove -n ldm conda env create -f environment-mac.yaml` +````bash +conda deactivate + +conda env remove -n ldm +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yaml +``` Fourth, If you have activated the ldm virtual environment and tried rebuilding it, maybe the problem could be that I have something installed that you don't diff --git a/ldm/dream/server.py b/ldm/dream/server.py index cde3957a1f..372d719052 100644 --- a/ldm/dream/server.py +++ b/ldm/dream/server.py @@ -228,7 +228,8 @@ class DreamServer(BaseHTTPRequestHandler): nonlocal step_index if opt.progress_images and step % 5 == 0 and step < opt.steps - 1: image = self.model.sample_to_image(sample) - name = f'{prefix}.{opt.seed}.{step_index}.png' + step_index_padded = str(step_index).rjust(len(str(opt.steps)), '0') + name = f'{prefix}.{opt.seed}.{step_index_padded}.png' metadata = f'{opt.prompt} -S{opt.seed} [intermediate]' path = step_writer.save_image_and_prompt_to_png(image, dream_prompt=metadata, name=name) step_index += 1 diff --git a/ldm/modules/embedding_manager.py b/ldm/modules/embedding_manager.py index b579bcd885..09e6f495ab 100644 --- a/ldm/modules/embedding_manager.py +++ b/ldm/modules/embedding_manager.py @@ -82,7 +82,9 @@ class EmbeddingManager(nn.Module): get_embedding_for_clip_token, embedder.transformer.text_model.embeddings, ) - token_dim = 1280 + # per bug report #572 + #token_dim = 1280 + token_dim = 768 else: # using LDM's BERT encoder self.is_clip = False get_token_for_string = partial( diff --git a/tests/dev_prompts.txt b/tests/dev_prompts.txt new file mode 100644 index 0000000000..9ebca4e9f7 --- /dev/null +++ b/tests/dev_prompts.txt @@ -0,0 +1 @@ +banana sushi -Ak_lms -S42 diff --git a/tests/preflight_prompts.txt b/tests/preflight_prompts.txt new file mode 100644 index 0000000000..5c5b8233a1 --- /dev/null +++ b/tests/preflight_prompts.txt @@ -0,0 +1,9 @@ +banana sushi -Ak_lms -S42 +banana sushi -Addim -S42 +banana sushi -Ak_lms -W640 -H480 -S42 +banana sushi -Ak_lms -S42 -G1 -U 2 0.5 +banana sushi -Ak_lms -S42 -v0.2 -n3 +banana sushi -Ak_lms -S42 -V1349749425:0.1,4145759947:0.1 +snake -I outputs/preflight/000006.4145759947.png -S42 +snake -I outputs/preflight/000006.4145759947.png -S42 -W640 -H640 --fit +strawberry sushi -I./image-and-mask.png -S42 -f0.9 -s100 -C15 \ No newline at end of file diff --git a/tests/prompts.txt b/tests/prompts.txt deleted file mode 100644 index 955220a5e6..0000000000 --- a/tests/prompts.txt +++ /dev/null @@ -1 +0,0 @@ -test trending on artstation -s 1 -S 1 From fefcdffb55e1784efdaeb3e26436e4f7fcfb5ea0 Mon Sep 17 00:00:00 2001 From: tildebyte <337875+tildebyte@users.noreply.github.com> Date: Fri, 16 Sep 2022 20:21:19 -0400 Subject: [PATCH 03/11] fix(readme): switch last-commit badge to last DEV commit (#626) - switch badge service to badgen, as I couldn't figure out shields.io Signed-off-by: Ben Alkov Signed-off-by: Ben Alkov --- README.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 2dacbc7866..8f88a5c9d2 100644 --- a/README.md +++ b/README.md @@ -5,11 +5,16 @@

- last-commit - stars -
- issues - pull-requests + release + stars + forks +
+ CI status on main + CI status on dev + last-dev-commit +
+ open-issues + open-prs

This is a fork of [CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion), the open From 42072fc15c6c70ac33ad7d34d834e55307b27dc1 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Sat, 17 Sep 2022 14:12:35 +1200 Subject: [PATCH 04/11] Bug Fixes --- ldm/dream/args.py | 15 ++++++++++++--- scripts/dream.py | 10 +++++----- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/ldm/dream/args.py b/ldm/dream/args.py index 24056d340e..04cea509a7 100644 --- a/ldm/dream/args.py +++ b/ldm/dream/args.py @@ -105,6 +105,7 @@ class Args(object): try: elements = shlex.split(command) except ValueError: + import sys, traceback print(traceback.format_exc(), file=sys.stderr) return switches = [''] @@ -266,6 +267,17 @@ class Args(object): default='stable-diffusion-1.4', help='Indicates which diffusion model to load. (currently "stable-diffusion-1.4" (default) or "laion400m")', ) + model_group.add_argument( + '--sampler', + '-A', + '-m', + dest='sampler_name', + type=str, + choices=SAMPLER_CHOICES, + metavar='SAMPLER_NAME', + help=f'Switch to a different sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}', + default='k_lms', + ) model_group.add_argument( '-F', '--full_precision', @@ -386,14 +398,12 @@ class Args(object): '--width', type=int, help='Image width, multiple of 64', - default=512 ) render_group.add_argument( '-H', '--height', type=int, help='Image height, multiple of 64', - default=512, ) render_group.add_argument( '-C', @@ -429,7 +439,6 @@ class Args(object): choices=SAMPLER_CHOICES, metavar='SAMPLER_NAME', help=f'Switch to a different sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}', - default='k_lms', ) render_group.add_argument( '-t', diff --git a/scripts/dream.py b/scripts/dream.py index 35de70d650..b8a5e5a84b 100644 --- a/scripts/dream.py +++ b/scripts/dream.py @@ -123,7 +123,7 @@ def main_loop(gen, opt, infile): if command.startswith(('#', '//')): continue - if command.startswith('q '): + if len(command.strip()) == 1 and command.startswith('q'): done = True break @@ -138,7 +138,7 @@ def main_loop(gen, opt, infile): parser.print_help() continue if len(opt.prompt) == 0: - print('Try again with a prompt!') + print('\nTry again with a prompt!') continue # retrieve previous value! @@ -191,14 +191,14 @@ def main_loop(gen, opt, infile): if not os.path.exists(opt.outdir): os.makedirs(opt.outdir) current_outdir = opt.outdir - elif prompt_as_dir: + elif opt.prompt_as_dir: # sanitize the prompt to a valid folder name subdir = path_filter.sub('_', opt.prompt)[:name_max].rstrip(' .') # truncate path to maximum allowed length # 27 is the length of '######.##########.##.png', plus two separators and a NUL subdir = subdir[:(path_max - 27 - len(os.path.abspath(opt.outdir)))] - current_outdir = os.path.join(outdir, subdir) + current_outdir = os.path.join(opt.outdir, subdir) print('Writing files to directory: "' + current_outdir + '"') @@ -206,7 +206,7 @@ def main_loop(gen, opt, infile): if not os.path.exists(current_outdir): os.makedirs(current_outdir) else: - current_outdir = outdir + current_outdir = opt.outdir # Here is where the images are actually generated! last_results = [] From b89aadb3c957c2b87b58334a16734f0fbba2f293 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 17 Sep 2022 00:57:35 -0400 Subject: [PATCH 05/11] fix crash on second prompt #636 --- ldm/dream/args.py | 21 +++++++++++++-------- ldm/dream/pngwriter.py | 1 - scripts/dream.py | 5 +---- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/ldm/dream/args.py b/ldm/dream/args.py index 04cea509a7..e9b70a7199 100644 --- a/ldm/dream/args.py +++ b/ldm/dream/args.py @@ -190,10 +190,10 @@ class Args(object): pass if cmd_switches and arg_switches and name=='__dict__': - a = arg_switches.__dict__ - a.update(cmd_switches.__dict__) - return a - + return self._merge_dict( + arg_switches.__dict__, + cmd_switches.__dict__, + ) try: return object.__getattribute__(self,name) except AttributeError: @@ -218,10 +218,7 @@ class Args(object): # funny because of their push/pull relationship. This is how to handle it. if name=='grid': return not cmd_switches.individual and value_arg # arg supersedes cmd - if value_cmd is not None: - return value_cmd - else: - return value_arg + return value_cmd if value_cmd is not None else value_arg def __setattr__(self,name,value): if name.startswith('_'): @@ -229,6 +226,14 @@ class Args(object): else: self._cmd_switches.__dict__[name] = value + def _merge_dict(self,dict1,dict2): + new_dict = {} + for k in set(list(dict1.keys())+list(dict2.keys())): + value1 = dict1.get(k,None) + value2 = dict2.get(k,None) + new_dict[k] = value2 if value2 is not None else value1 + return new_dict + def _create_arg_parser(self): ''' This defines all the arguments used on the command line when you launch diff --git a/ldm/dream/pngwriter.py b/ldm/dream/pngwriter.py index 9a2a8bc816..5cda259357 100644 --- a/ldm/dream/pngwriter.py +++ b/ldm/dream/pngwriter.py @@ -34,7 +34,6 @@ class PngWriter: # saves image named _image_ to outdir/name, writing metadata from prompt # returns full path of output def save_image_and_prompt_to_png(self, image, dream_prompt, name, metadata=None): - print(f'self.outdir={self.outdir}, name={name}') path = os.path.join(self.outdir, name) info = PngImagePlugin.PngInfo() info.add_text('Dream', dream_prompt) diff --git a/scripts/dream.py b/scripts/dream.py index b8a5e5a84b..ad10551bbc 100644 --- a/scripts/dream.py +++ b/scripts/dream.py @@ -132,10 +132,7 @@ def main_loop(gen, opt, infile): ): # in case a stored prompt still contains the !dream command command.replace('!dream','',1) - try: - parser = opt.parse_cmd(command) - except SystemExit: - parser.print_help() + if opt.parse_cmd(command) is None: continue if len(opt.prompt) == 0: print('\nTry again with a prompt!') From 5b692f47205780cfc2019992c78ce31b509f4578 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 17 Sep 2022 01:14:00 -0400 Subject: [PATCH 06/11] include width and height in png dream prompt --- scripts/dream.py | 7 +++++++ 1 file changed, 7 insertions(+) mode change 100644 => 100755 scripts/dream.py diff --git a/scripts/dream.py b/scripts/dream.py old mode 100644 new mode 100755 index ad10551bbc..f147008d78 --- a/scripts/dream.py +++ b/scripts/dream.py @@ -100,6 +100,7 @@ def main_loop(gen, opt, infile): done = False path_filter = re.compile(r'[<>:"/\\|?*]') last_results = list() + model_config = OmegaConf.load(opt.conf)[opt.model] # os.pathconf is not available on Windows if hasattr(os, 'pathconf'): @@ -138,6 +139,12 @@ def main_loop(gen, opt, infile): print('\nTry again with a prompt!') continue + # width and height are set by model if not specified + if not opt.width: + opt.width = model_config.width + if not opt.height: + opt.height = model_config.height + # retrieve previous value! if opt.init_img is not None and re.match('^-\\d+$', opt.init_img): try: From 31daf1f0d794cac1eba526d2e4c17fc8ea1f4867 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 17 Sep 2022 01:32:31 -0400 Subject: [PATCH 07/11] preload_models.py now downloads gfpgan model --- scripts/preload_models.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/scripts/preload_models.py b/scripts/preload_models.py index ba7b242ece..794a552134 100755 --- a/scripts/preload_models.py +++ b/scripts/preload_models.py @@ -10,6 +10,7 @@ import sys import transformers import os import warnings +import urllib.request transformers.logging.set_verbosity_error() @@ -81,6 +82,16 @@ if gfpgan: print('...success') except Exception: import traceback + print('Error loading ESRGAN:') + print(traceback.format_exc()) + try: + import urllib.request + model_path = 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth' + model_dest = 'src/gfpgan/experiments/pretrained_models/GFPGANv1.3.pth' + print('downloading gfpgan model file...') + urllib.request.urlretrieve(model_path,model_dest) + except Exception: + import traceback print('Error loading GFPGAN:') print(traceback.format_exc()) From 89540f293bfab3295ede7a0b73fa277f2a463a36 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 17 Sep 2022 02:01:55 -0400 Subject: [PATCH 08/11] Restored static files needed for new flask/react web server WARNING: old web server will no longer display correct interface. --- static/dream_web/index.css | 61 ++++-- static/dream_web/index.html | 274 +++++++++++++++---------- static/dream_web/index.js | 399 ++++++++++++++++++++++++++---------- 3 files changed, 497 insertions(+), 237 deletions(-) diff --git a/static/dream_web/index.css b/static/dream_web/index.css index 51f0f267c3..25a0994a3d 100644 --- a/static/dream_web/index.css +++ b/static/dream_web/index.css @@ -1,3 +1,8 @@ +:root { + --fields-dark:#DCDCDC; + --fields-light:#F5F5F5; +} + * { font-family: 'Arial'; font-size: 100%; @@ -18,15 +23,26 @@ fieldset { border: none; line-height: 2.2em; } +fieldset > legend { + width: auto; + margin-left: 0; + margin-right: auto; + font-weight:bold; +} select, input { margin-right: 10px; padding: 2px; } +input:disabled { + cursor:auto; +} input[type=submit] { + cursor: pointer; background-color: #666; color: white; } input[type=checkbox] { + cursor: pointer; margin-right: 0px; width: 20px; height: 20px; @@ -87,11 +103,11 @@ header h1 { } #results img { border-radius: 5px; - object-fit: cover; + object-fit: contain; + background-color: var(--fields-dark); } #fieldset-config { line-height:2em; - background-color: #F0F0F0; } input[type="number"] { width: 60px; @@ -118,35 +134,46 @@ label { #progress-image { width: 30vh; height: 30vh; + object-fit: contain; + background-color: var(--fields-dark); } #cancel-button { cursor: pointer; color: red; } -#basic-parameters { - background-color: #EEEEEE; -} #txt2img { - background-color: #DCDCDC; + background-color: var(--fields-dark); } #variations { - background-color: #EEEEEE; + background-color: var(--fields-light); +} +#initimg { + background-color: var(--fields-dark); } #img2img { - background-color: #DCDCDC; + background-color: var(--fields-light); } -#gfpgan { - background-color: #EEEEEE; +#initimg > :not(legend) { + background-color: var(--fields-light); + margin: .5em; +} + +#postprocess, #initimg { + display:flex; + flex-wrap:wrap; + padding: 0; + margin-top: 1em; + background-color: var(--fields-dark); +} +#postprocess > fieldset, #initimg > * { + flex-grow: 1; +} +#postprocess > fieldset { + background-color: var(--fields-dark); } #progress-section { - background-color: #F5F5F5; -} -.section-header { - text-align: left; - font-weight: bold; - padding: 0 0 0 0; + background-color: var(--fields-light); } #no-results-message:not(:only-child) { display: none; } - diff --git a/static/dream_web/index.html b/static/dream_web/index.html index 1e194c0205..9dbd213669 100644 --- a/static/dream_web/index.html +++ b/static/dream_web/index.html @@ -1,102 +1,152 @@ - - Stable Diffusion Dream Server - - - - - - - - -
-

Stable Diffusion Dream Server

-
- For news and support for this web service, visit our GitHub site -
-
-
-
-
- -
-
-
Basic options
- - - - - - - - - - -
- - - - - - - - - - - - - - - - -
-
-
Image-to-image options
+ + Stable Diffusion Dream Server + + + + + + + + + + + +
+

Stable Diffusion Dream Server

+
+ For news and support for this web service, visit our GitHub + site +
+
+ +
+ + +
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + +
+ + + + +
+
+
+ + + + +
-
- - - - -
+ +
+ + + + + + + + +
+
+
-
Post-processing options
- - - + + + + + + +
+
+ + + + +
- -
-
-
- - -
- -
- Postprocessing...1/3 -
- -
- -
-
-

No results...

+
+ + +
+
+
+ + +
+ +
+ Postprocessing...1/3
-
- + + +
+
+ + + diff --git a/static/dream_web/index.js b/static/dream_web/index.js index ac68034920..5de690297d 100644 --- a/static/dream_web/index.js +++ b/static/dream_web/index.js @@ -1,3 +1,109 @@ +const socket = io(); + +var priorResultsLoadState = { + page: 0, + pages: 1, + per_page: 10, + total: 20, + offset: 0, // number of items generated since last load + loading: false, + initialized: false +}; + +function loadPriorResults() { + // Fix next page by offset + let offsetPages = priorResultsLoadState.offset / priorResultsLoadState.per_page; + priorResultsLoadState.page += offsetPages; + priorResultsLoadState.pages += offsetPages; + priorResultsLoadState.total += priorResultsLoadState.offset; + priorResultsLoadState.offset = 0; + + if (priorResultsLoadState.loading) { + return; + } + + if (priorResultsLoadState.page >= priorResultsLoadState.pages) { + return; // Nothing more to load + } + + // Load + priorResultsLoadState.loading = true + let url = new URL('/api/images', document.baseURI); + url.searchParams.append('page', priorResultsLoadState.initialized ? priorResultsLoadState.page + 1 : priorResultsLoadState.page); + url.searchParams.append('per_page', priorResultsLoadState.per_page); + fetch(url.href, { + method: 'GET', + headers: new Headers({'content-type': 'application/json'}) + }) + .then(response => response.json()) + .then(data => { + priorResultsLoadState.page = data.page; + priorResultsLoadState.pages = data.pages; + priorResultsLoadState.per_page = data.per_page; + priorResultsLoadState.total = data.total; + + data.items.forEach(function(dreamId, index) { + let src = 'api/images/' + dreamId; + fetch('/api/images/' + dreamId + '/metadata', { + method: 'GET', + headers: new Headers({'content-type': 'application/json'}) + }) + .then(response => response.json()) + .then(metadata => { + let seed = metadata.seed || 0; // TODO: Parse old metadata + appendOutput(src, seed, metadata, true); + }); + }); + + // Load until page is full + if (!priorResultsLoadState.initialized) { + if (document.body.scrollHeight <= window.innerHeight) { + loadPriorResults(); + } + } + }) + .finally(() => { + priorResultsLoadState.loading = false; + priorResultsLoadState.initialized = true; + }); +} + +function resetForm() { + var form = document.getElementById('generate-form'); + form.querySelector('fieldset').removeAttribute('disabled'); +} + +function initProgress(totalSteps, showProgressImages) { + // TODO: Progress could theoretically come from multiple jobs at the same time (in the future) + let progressSectionEle = document.querySelector('#progress-section'); + progressSectionEle.style.display = 'initial'; + let progressEle = document.querySelector('#progress-bar'); + progressEle.setAttribute('max', totalSteps); + + let progressImageEle = document.querySelector('#progress-image'); + progressImageEle.src = BLANK_IMAGE_URL; + progressImageEle.style.display = showProgressImages ? 'initial': 'none'; +} + +function setProgress(step, totalSteps, src) { + let progressEle = document.querySelector('#progress-bar'); + progressEle.setAttribute('value', step); + + if (src) { + let progressImageEle = document.querySelector('#progress-image'); + progressImageEle.src = src; + } +} + +function resetProgress(hide = true) { + if (hide) { + let progressSectionEle = document.querySelector('#progress-section'); + progressSectionEle.style.display = 'none'; + } + let progressEle = document.querySelector('#progress-bar'); + progressEle.setAttribute('value', 0); +} + function toBase64(file) { return new Promise((resolve, reject) => { const r = new FileReader(); @@ -7,17 +113,41 @@ function toBase64(file) { }); } -function appendOutput(src, seed, config) { - let outputNode = document.createElement("figure"); - - let variations = config.with_variations; - if (config.variation_amount > 0) { - variations = (variations ? variations + ',' : '') + seed + ':' + config.variation_amount; +function ondragdream(event) { + let dream = event.target.dataset.dream; + event.dataTransfer.setData("dream", dream); +} + +function seedClick(event) { + // Get element + var image = event.target.closest('figure').querySelector('img'); + var dream = JSON.parse(decodeURIComponent(image.dataset.dream)); + + let form = document.querySelector("#generate-form"); + for (const [k, v] of new FormData(form)) { + if (k == 'initimg') { continue; } + let formElem = form.querySelector(`*[name=${k}]`); + formElem.value = dream[k] !== undefined ? dream[k] : formElem.defaultValue; } - let baseseed = (config.with_variations || config.variation_amount > 0) ? config.seed : seed; - let altText = baseseed + ' | ' + (variations ? variations + ' | ' : '') + config.prompt; + + document.querySelector("#seed").value = dream.seed; + document.querySelector('#iterations').value = 1; // Reset to 1 iteration since we clicked a single image (not a full job) + + // NOTE: leaving this manual for the user for now - it was very confusing with this behavior + // document.querySelector("#with_variations").value = variations || ''; + // if (document.querySelector("#variation_amount").value <= 0) { + // document.querySelector("#variation_amount").value = 0.2; + // } + + saveFields(document.querySelector("#generate-form")); +} + +function appendOutput(src, seed, config, toEnd=false) { + let outputNode = document.createElement("figure"); + let altText = seed.toString() + " | " + config.prompt; // img needs width and height for lazy loading to work + // TODO: store the full config in a data attribute on the image? const figureContents = ` + height="256" + draggable="true" + ondragstart="ondragdream(event, this)" + data-dream="${encodeURIComponent(JSON.stringify(config))}" + data-dreamId="${encodeURIComponent(config.dreamId)}"> -
${seed}
+
${seed}
`; outputNode.innerHTML = figureContents; - let figcaption = outputNode.querySelector('figcaption'); - // Reload image config - figcaption.addEventListener('click', () => { - let form = document.querySelector("#generate-form"); - for (const [k, v] of new FormData(form)) { - if (k == 'initimg') { continue; } - form.querySelector(`*[name=${k}]`).value = config[k]; - } - - document.querySelector("#seed").value = baseseed; - document.querySelector("#with_variations").value = variations || ''; - if (document.querySelector("#variation_amount").value <= 0) { - document.querySelector("#variation_amount").value = 0.2; - } - - saveFields(document.querySelector("#generate-form")); - }); - - document.querySelector("#results").prepend(outputNode); + if (toEnd) { + document.querySelector("#results").append(outputNode); + } else { + document.querySelector("#results").prepend(outputNode); + } + document.querySelector("#no-results-message")?.remove(); } function saveFields(form) { @@ -79,93 +200,109 @@ function clearFields(form) { const BLANK_IMAGE_URL = 'data:image/svg+xml,'; async function generateSubmit(form) { - const prompt = document.querySelector("#prompt").value; - // Convert file data to base64 + // TODO: Should probably uplaod files with formdata or something, and store them in the backend? let formData = Object.fromEntries(new FormData(form)); + if (!formData.enable_generate && !formData.enable_init_image) { + gen_label = document.querySelector("label[for=enable_generate]").innerHTML; + initimg_label = document.querySelector("label[for=enable_init_image]").innerHTML; + alert(`Error: one of "${gen_label}" or "${initimg_label}" must be set`); + } + + formData.initimg_name = formData.initimg.name formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null; - let strength = formData.strength; - let totalSteps = formData.initimg ? Math.floor(strength * formData.steps) : formData.steps; - - let progressSectionEle = document.querySelector('#progress-section'); - progressSectionEle.style.display = 'initial'; - let progressEle = document.querySelector('#progress-bar'); - progressEle.setAttribute('max', totalSteps); - let progressImageEle = document.querySelector('#progress-image'); - progressImageEle.src = BLANK_IMAGE_URL; - - progressImageEle.style.display = {}.hasOwnProperty.call(formData, 'progress_images') ? 'initial': 'none'; - - // Post as JSON, using Fetch streaming to get results - fetch(form.action, { - method: form.method, - body: JSON.stringify(formData), - }).then(async (response) => { - const reader = response.body.getReader(); - - let noOutputs = true; - while (true) { - let {value, done} = await reader.read(); - value = new TextDecoder().decode(value); - if (done) { - progressSectionEle.style.display = 'none'; - break; - } - - for (let event of value.split('\n').filter(e => e !== '')) { - const data = JSON.parse(event); - - if (data.event === 'result') { - noOutputs = false; - appendOutput(data.url, data.seed, data.config); - progressEle.setAttribute('value', 0); - progressEle.setAttribute('max', totalSteps); - } else if (data.event === 'upscaling-started') { - document.getElementById("processing_cnt").textContent=data.processed_file_cnt; - document.getElementById("scaling-inprocess-message").style.display = "block"; - } else if (data.event === 'upscaling-done') { - document.getElementById("scaling-inprocess-message").style.display = "none"; - } else if (data.event === 'step') { - progressEle.setAttribute('value', data.step); - if (data.url) { - progressImageEle.src = data.url; - } - } else if (data.event === 'canceled') { - // avoid alerting as if this were an error case - noOutputs = false; - } - } - } - - // Re-enable form, remove no-results-message - form.querySelector('fieldset').removeAttribute('disabled'); - document.querySelector("#prompt").value = prompt; - document.querySelector('progress').setAttribute('value', '0'); - - if (noOutputs) { - alert("Error occurred while generating."); - } + // Evaluate all checkboxes + let checkboxes = form.querySelectorAll('input[type=checkbox]'); + checkboxes.forEach(function (checkbox) { + if (checkbox.checked) { + formData[checkbox.name] = 'true'; + } + }); + + let strength = formData.strength; + let totalSteps = formData.initimg ? Math.floor(strength * formData.steps) : formData.steps; + let showProgressImages = formData.progress_images; + + // Set enabling flags + + + // Initialize the progress bar + initProgress(totalSteps, showProgressImages); + + // POST, use response to listen for events + fetch(form.action, { + method: form.method, + headers: new Headers({'content-type': 'application/json'}), + body: JSON.stringify(formData), + }) + .then(response => response.json()) + .then(data => { + var jobId = data.jobId; + socket.emit('join_room', { 'room': jobId }); }); - // Disable form while generating form.querySelector('fieldset').setAttribute('disabled',''); - document.querySelector("#prompt").value = `Generating: "${prompt}"`; } -async function fetchRunLog() { - try { - let response = await fetch('/run_log.json') - const data = await response.json(); - for(let item of data.run_log) { - appendOutput(item.url, item.seed, item); - } - } catch (e) { - console.error(e); - } +function fieldSetEnableChecked(event) { + cb = event.target; + fields = cb.closest('fieldset'); + fields.disabled = !cb.checked; } +// Socket listeners +socket.on('job_started', (data) => {}) + +socket.on('dream_result', (data) => { + var jobId = data.jobId; + var dreamId = data.dreamId; + var dreamRequest = data.dreamRequest; + var src = 'api/images/' + dreamId; + + priorResultsLoadState.offset += 1; + appendOutput(src, dreamRequest.seed, dreamRequest); + + resetProgress(false); +}) + +socket.on('dream_progress', (data) => { + // TODO: it'd be nice if we could get a seed reported here, but the generator would need to be updated + var step = data.step; + var totalSteps = data.totalSteps; + var jobId = data.jobId; + var dreamId = data.dreamId; + + var progressType = data.progressType + if (progressType === 'GENERATION') { + var src = data.hasProgressImage ? + 'api/intermediates/' + dreamId + '/' + step + : null; + setProgress(step, totalSteps, src); + } else if (progressType === 'UPSCALING_STARTED') { + // step and totalSteps are used for upscale count on this message + document.getElementById("processing_cnt").textContent = step; + document.getElementById("processing_total").textContent = totalSteps; + document.getElementById("scaling-inprocess-message").style.display = "block"; + } else if (progressType == 'UPSCALING_DONE') { + document.getElementById("scaling-inprocess-message").style.display = "none"; + } +}) + +socket.on('job_canceled', (data) => { + resetForm(); + resetProgress(); +}) + +socket.on('job_done', (data) => { + jobId = data.jobId + socket.emit('leave_room', { 'room': jobId }); + + resetForm(); + resetProgress(); +}) + window.onload = async () => { document.querySelector("#prompt").addEventListener("keydown", (e) => { if (e.key === "Enter" && !e.shiftKey) { @@ -183,7 +320,7 @@ window.onload = async () => { saveFields(e.target.form); }); document.querySelector("#reset-seed").addEventListener('click', (e) => { - document.querySelector("#seed").value = -1; + document.querySelector("#seed").value = 0; saveFields(e.target.form); }); document.querySelector("#reset-all").addEventListener('click', (e) => { @@ -195,13 +332,13 @@ window.onload = async () => { loadFields(document.querySelector("#generate-form")); document.querySelector('#cancel-button').addEventListener('click', () => { - fetch('/cancel').catch(e => { + fetch('/api/cancel').catch(e => { console.error(e); }); }); document.documentElement.addEventListener('keydown', (e) => { if (e.key === "Escape") - fetch('/cancel').catch(err => { + fetch('/api/cancel').catch(err => { console.error(err); }); }); @@ -209,5 +346,51 @@ window.onload = async () => { if (!config.gfpgan_model_exists) { document.querySelector("#gfpgan").style.display = 'none'; } - await fetchRunLog() + + window.addEventListener("scroll", () => { + if ((window.innerHeight + window.pageYOffset) >= document.body.offsetHeight) { + loadPriorResults(); + } + }); + + + + // Enable/disable forms by checkboxes + document.querySelectorAll("legend > input[type=checkbox]").forEach(function(cb) { + cb.addEventListener('change', fieldSetEnableChecked); + fieldSetEnableChecked({ target: cb}) + }); + + + // Load some of the previous results + loadPriorResults(); + + // Image drop/upload WIP + /* + let drop = document.getElementById('dropper'); + function ondrop(event) { + let dreamData = event.dataTransfer.getData('dream'); + if (dreamData) { + var dream = JSON.parse(decodeURIComponent(dreamData)); + alert(dream.dreamId); + } + }; + + function ondragenter(event) { + event.preventDefault(); + }; + + function ondragover(event) { + event.preventDefault(); + }; + + function ondragleave(event) { + + } + + drop.addEventListener('drop', ondrop); + drop.addEventListener('dragenter', ondragenter); + drop.addEventListener('dragover', ondragover); + drop.addEventListener('dragleave', ondragleave); + */ }; From b3e026aa4edce564d49c31fe301ccfae8b335b4a Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 17 Sep 2022 02:18:52 -0400 Subject: [PATCH 09/11] point legacy web server at legacy static files --- ldm/dream/server.py | 8 +- static/legacy_web/favicon.ico | Bin 0 -> 1150 bytes static/legacy_web/index.css | 143 +++++++++++++++++++++++ static/legacy_web/index.html | 126 ++++++++++++++++++++ static/legacy_web/index.js | 213 ++++++++++++++++++++++++++++++++++ 5 files changed, 486 insertions(+), 4 deletions(-) create mode 100644 static/legacy_web/favicon.ico create mode 100644 static/legacy_web/index.css create mode 100644 static/legacy_web/index.html create mode 100644 static/legacy_web/index.js diff --git a/ldm/dream/server.py b/ldm/dream/server.py index 372d719052..9147a3180a 100644 --- a/ldm/dream/server.py +++ b/ldm/dream/server.py @@ -76,7 +76,7 @@ class DreamServer(BaseHTTPRequestHandler): self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() - with open("./static/dream_web/index.html", "rb") as content: + with open("./static/legacy_web/index.html", "rb") as content: self.wfile.write(content.read()) elif self.path == "/config.js": # unfortunately this import can't be at the top level, since that would cause a circular import @@ -94,7 +94,7 @@ class DreamServer(BaseHTTPRequestHandler): self.end_headers() output = [] - log_file = os.path.join(self.outdir, "dream_web_log.txt") + log_file = os.path.join(self.outdir, "legacy_web_log.txt") if os.path.exists(log_file): with open(log_file, "r") as log: for line in log: @@ -114,7 +114,7 @@ class DreamServer(BaseHTTPRequestHandler): else: path_dir = os.path.dirname(self.path) out_dir = os.path.realpath(self.outdir.rstrip('/')) - if self.path.startswith('/static/dream_web/'): + if self.path.startswith('/static/legacy_web/'): path = '.' + self.path elif out_dir.replace('\\', '/').endswith(path_dir): file = os.path.basename(self.path) @@ -188,7 +188,7 @@ class DreamServer(BaseHTTPRequestHandler): config['seed'] = seed # Append post_data to log, but only once! if not upscaled: - with open(os.path.join(self.outdir, "dream_web_log.txt"), "a") as log: + with open(os.path.join(self.outdir, "legacy_web_log.txt"), "a") as log: log.write(f"{path}: {json.dumps(config)}\n") self.wfile.write(bytes(json.dumps( diff --git a/static/legacy_web/favicon.ico b/static/legacy_web/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..51eb844a6a4a9d4b13e17e38b0fc915e7e97d4b5 GIT binary patch literal 1150 zcmaiy%TE(g6vi*n1a-yAr5H_2eSt+l!2}h8?$p@n=nPJTglL%pit>^TL`+1D5hx&N z)!<{Tc1e&lvO-)*Ow^TsgK$#zJKYFEA;2&@TN?6A5C9Q()1;lGF^Sd zF~GSouqjvv->jVh^vZ3gw#sUXZQHSqR>WSmwCOtUf;BK6W$k#wMKX$aiq1TKiY)i0 zVAh_I80S)!qiamC2k7>K9QPINuKnap%uv%}j+#E^Jur4AXDJpbkvT6Ctz07yN&)Z7 znrGHFe)vUp?-<1^k5RnhDB0a3h^>+{H77oj<%hM0acGw^T{k?>wWp=8-IJ2<;2zkW z55$XEACugh&R(wZ1^nba=DC(TD08@HP|IVZ?1<#7_S=$s)|_Dd@;ZI;mZvYT`CA{Y z_Vq(y{pYvZf8ANnKfH$f+a32rZ=N(I_xgGd_x}n~fRYte5_cZWQRBiY+1KuqaiB`D zuiiy$g`D(znbUIcklw#ZXiGqz&xFs + + Stable Diffusion Dream Server + + + + + + + + +
+

Stable Diffusion Dream Server

+
+ For news and support for this web service, visit our GitHub site +
+
+ +
+
+
+ +
+
+ + + + + + + + + + +
+ + + + + + + + + + +
+ + + + +
+
+ + + +
+ + + + +
+
+
+ + + + + + +
+
+
+
+
+ + +
+ +
+ Postprocessing...1/3 +
+
+
+ +
+
+

No results...

+
+
+
+ + diff --git a/static/legacy_web/index.js b/static/legacy_web/index.js new file mode 100644 index 0000000000..ac68034920 --- /dev/null +++ b/static/legacy_web/index.js @@ -0,0 +1,213 @@ +function toBase64(file) { + return new Promise((resolve, reject) => { + const r = new FileReader(); + r.readAsDataURL(file); + r.onload = () => resolve(r.result); + r.onerror = (error) => reject(error); + }); +} + +function appendOutput(src, seed, config) { + let outputNode = document.createElement("figure"); + + let variations = config.with_variations; + if (config.variation_amount > 0) { + variations = (variations ? variations + ',' : '') + seed + ':' + config.variation_amount; + } + let baseseed = (config.with_variations || config.variation_amount > 0) ? config.seed : seed; + let altText = baseseed + ' | ' + (variations ? variations + ' | ' : '') + config.prompt; + + // img needs width and height for lazy loading to work + const figureContents = ` + + ${altText} + +
${seed}
+ `; + + outputNode.innerHTML = figureContents; + let figcaption = outputNode.querySelector('figcaption'); + + // Reload image config + figcaption.addEventListener('click', () => { + let form = document.querySelector("#generate-form"); + for (const [k, v] of new FormData(form)) { + if (k == 'initimg') { continue; } + form.querySelector(`*[name=${k}]`).value = config[k]; + } + + document.querySelector("#seed").value = baseseed; + document.querySelector("#with_variations").value = variations || ''; + if (document.querySelector("#variation_amount").value <= 0) { + document.querySelector("#variation_amount").value = 0.2; + } + + saveFields(document.querySelector("#generate-form")); + }); + + document.querySelector("#results").prepend(outputNode); +} + +function saveFields(form) { + for (const [k, v] of new FormData(form)) { + if (typeof v !== 'object') { // Don't save 'file' type + localStorage.setItem(k, v); + } + } +} + +function loadFields(form) { + for (const [k, v] of new FormData(form)) { + const item = localStorage.getItem(k); + if (item != null) { + form.querySelector(`*[name=${k}]`).value = item; + } + } +} + +function clearFields(form) { + localStorage.clear(); + let prompt = form.prompt.value; + form.reset(); + form.prompt.value = prompt; +} + +const BLANK_IMAGE_URL = 'data:image/svg+xml,'; +async function generateSubmit(form) { + const prompt = document.querySelector("#prompt").value; + + // Convert file data to base64 + let formData = Object.fromEntries(new FormData(form)); + formData.initimg_name = formData.initimg.name + formData.initimg = formData.initimg.name !== '' ? await toBase64(formData.initimg) : null; + + let strength = formData.strength; + let totalSteps = formData.initimg ? Math.floor(strength * formData.steps) : formData.steps; + + let progressSectionEle = document.querySelector('#progress-section'); + progressSectionEle.style.display = 'initial'; + let progressEle = document.querySelector('#progress-bar'); + progressEle.setAttribute('max', totalSteps); + let progressImageEle = document.querySelector('#progress-image'); + progressImageEle.src = BLANK_IMAGE_URL; + + progressImageEle.style.display = {}.hasOwnProperty.call(formData, 'progress_images') ? 'initial': 'none'; + + // Post as JSON, using Fetch streaming to get results + fetch(form.action, { + method: form.method, + body: JSON.stringify(formData), + }).then(async (response) => { + const reader = response.body.getReader(); + + let noOutputs = true; + while (true) { + let {value, done} = await reader.read(); + value = new TextDecoder().decode(value); + if (done) { + progressSectionEle.style.display = 'none'; + break; + } + + for (let event of value.split('\n').filter(e => e !== '')) { + const data = JSON.parse(event); + + if (data.event === 'result') { + noOutputs = false; + appendOutput(data.url, data.seed, data.config); + progressEle.setAttribute('value', 0); + progressEle.setAttribute('max', totalSteps); + } else if (data.event === 'upscaling-started') { + document.getElementById("processing_cnt").textContent=data.processed_file_cnt; + document.getElementById("scaling-inprocess-message").style.display = "block"; + } else if (data.event === 'upscaling-done') { + document.getElementById("scaling-inprocess-message").style.display = "none"; + } else if (data.event === 'step') { + progressEle.setAttribute('value', data.step); + if (data.url) { + progressImageEle.src = data.url; + } + } else if (data.event === 'canceled') { + // avoid alerting as if this were an error case + noOutputs = false; + } + } + } + + // Re-enable form, remove no-results-message + form.querySelector('fieldset').removeAttribute('disabled'); + document.querySelector("#prompt").value = prompt; + document.querySelector('progress').setAttribute('value', '0'); + + if (noOutputs) { + alert("Error occurred while generating."); + } + }); + + // Disable form while generating + form.querySelector('fieldset').setAttribute('disabled',''); + document.querySelector("#prompt").value = `Generating: "${prompt}"`; +} + +async function fetchRunLog() { + try { + let response = await fetch('/run_log.json') + const data = await response.json(); + for(let item of data.run_log) { + appendOutput(item.url, item.seed, item); + } + } catch (e) { + console.error(e); + } +} + +window.onload = async () => { + document.querySelector("#prompt").addEventListener("keydown", (e) => { + if (e.key === "Enter" && !e.shiftKey) { + const form = e.target.form; + generateSubmit(form); + } + }); + document.querySelector("#generate-form").addEventListener('submit', (e) => { + e.preventDefault(); + const form = e.target; + + generateSubmit(form); + }); + document.querySelector("#generate-form").addEventListener('change', (e) => { + saveFields(e.target.form); + }); + document.querySelector("#reset-seed").addEventListener('click', (e) => { + document.querySelector("#seed").value = -1; + saveFields(e.target.form); + }); + document.querySelector("#reset-all").addEventListener('click', (e) => { + clearFields(e.target.form); + }); + document.querySelector("#remove-image").addEventListener('click', (e) => { + initimg.value=null; + }); + loadFields(document.querySelector("#generate-form")); + + document.querySelector('#cancel-button').addEventListener('click', () => { + fetch('/cancel').catch(e => { + console.error(e); + }); + }); + document.documentElement.addEventListener('keydown', (e) => { + if (e.key === "Escape") + fetch('/cancel').catch(err => { + console.error(err); + }); + }); + + if (!config.gfpgan_model_exists) { + document.querySelector("#gfpgan").style.display = 'none'; + } + await fetchRunLog() +}; From b5ed668effc27b5b84b0c345850ee67119133040 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 17 Sep 2022 02:44:07 -0400 Subject: [PATCH 10/11] small legacy web appearance tweaks --- static/legacy_web/index.css | 15 ++++++++++++--- static/legacy_web/index.html | 27 +++++++++++++++------------ 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/static/legacy_web/index.css b/static/legacy_web/index.css index e95c818e19..51f0f267c3 100644 --- a/static/legacy_web/index.css +++ b/static/legacy_web/index.css @@ -91,6 +91,7 @@ header h1 { } #fieldset-config { line-height:2em; + background-color: #F0F0F0; } input[type="number"] { width: 60px; @@ -122,6 +123,9 @@ label { cursor: pointer; color: red; } +#basic-parameters { + background-color: #EEEEEE; +} #txt2img { background-color: #DCDCDC; } @@ -129,15 +133,20 @@ label { background-color: #EEEEEE; } #img2img { - background-color: #F5F5F5; + background-color: #DCDCDC; } #gfpgan { - background-color: #DCDCDC; + background-color: #EEEEEE; } #progress-section { background-color: #F5F5F5; } - +.section-header { + text-align: left; + font-weight: bold; + padding: 0 0 0 0; +} #no-results-message:not(:only-child) { display: none; } + diff --git a/static/legacy_web/index.html b/static/legacy_web/index.html index 3d845f74ef..5ce8b45baf 100644 --- a/static/legacy_web/index.html +++ b/static/legacy_web/index.html @@ -25,6 +25,7 @@
+
Basic options
@@ -39,11 +40,11 @@ - + - +
-
+ -
-
- - - + +
+
+
Image-to-image options
+ + +
- - +
+
Post-processing options
- +