diff --git a/.github/workflows/cache-model.yml b/.github/workflows/cache-model.yml deleted file mode 100644 index 2682943eef..0000000000 --- a/.github/workflows/cache-model.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Cache Model -on: - workflow_dispatch -jobs: - build: - strategy: - matrix: - os: [ macos-12 ] - name: Create Caches using ${{ matrix.os }} - runs-on: ${{ matrix.os }} - steps: - - name: Checkout sources - uses: actions/checkout@v3 - - name: Cache model - id: cache-sd-v1-4 - uses: actions/cache@v3 - env: - cache-name: cache-sd-v1-4 - with: - path: models/ldm/stable-diffusion-v1/model.ckpt - key: ${{ env.cache-name }} - restore-keys: | - ${{ env.cache-name }} - - name: Download Stable Diffusion v1.4 model - if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }} - continue-on-error: true - run: | - if [ ! -e models/ldm/stable-diffusion-v1 ]; then - mkdir -p models/ldm/stable-diffusion-v1 - fi - if [ ! -e models/ldm/stable-diffusion-v1/model.ckpt ]; then - curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }} - fi -# Uncomment this when we no longer make changes to environment-mac.yaml -# - name: Cache environment -# id: cache-conda-env-ldm -# uses: actions/cache@v3 -# env: -# cache-name: cache-conda-env-ldm -# with: -# path: ~/.conda/envs/ldm -# key: ${{ env.cache-name }} -# restore-keys: | -# ${{ env.cache-name }} - - name: Install dependencies -# if: ${{ steps.cache-conda-env-ldm.outputs.cache-hit != 'true' }} - run: | - conda env create -f environment-mac.yaml - - name: Cache hugginface and torch models - id: cache-hugginface-torch - uses: actions/cache@v3 - env: - cache-name: cache-hugginface-torch - with: - path: ~/.cache - key: ${{ env.cache-name }} - restore-keys: | - ${{ env.cache-name }} - - name: Download Huggingface and Torch models - if: ${{ steps.cache-hugginface-torch.outputs.cache-hit != 'true' }} - continue-on-error: true - run: | - export PYTHON_BIN=/usr/local/miniconda/envs/ldm/bin/python - $PYTHON_BIN scripts/preload_models.py \ No newline at end of file diff --git a/.github/workflows/create-caches.yml b/.github/workflows/create-caches.yml new file mode 100644 index 0000000000..951718af1b --- /dev/null +++ b/.github/workflows/create-caches.yml @@ -0,0 +1,70 @@ +name: Create Caches +on: + workflow_dispatch +jobs: + build: + strategy: + matrix: + os: [ ubuntu-latest, macos-12 ] + name: Create Caches on ${{ matrix.os }} conda + runs-on: ${{ matrix.os }} + steps: + - name: Set platform variables + id: vars + run: | + if [ "$RUNNER_OS" = "macOS" ]; then + echo "::set-output name=ENV_FILE::environment-mac.yaml" + echo "::set-output name=PYTHON_BIN::/usr/local/miniconda/envs/ldm/bin/python" + elif [ "$RUNNER_OS" = "Linux" ]; then + echo "::set-output name=ENV_FILE::environment.yaml" + echo "::set-output name=PYTHON_BIN::/usr/share/miniconda/envs/ldm/bin/python" + fi + - name: Checkout sources + uses: actions/checkout@v3 + - name: Use Cached Stable Diffusion v1.4 Model + id: cache-sd-v1-4 + uses: actions/cache@v3 + env: + cache-name: cache-sd-v1-4 + with: + path: models/ldm/stable-diffusion-v1/model.ckpt + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }} + - name: Download Stable Diffusion v1.4 Model + if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }} + run: | + if [ ! -e models/ldm/stable-diffusion-v1 ]; then + mkdir -p models/ldm/stable-diffusion-v1 + fi + if [ ! -e models/ldm/stable-diffusion-v1/model.ckpt ]; then + curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }} + fi + - name: Use Cached Dependencies + id: cache-conda-env-ldm + uses: actions/cache@v3 + env: + cache-name: cache-conda-env-ldm + with: + path: ~/.conda/envs/ldm + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }}-${{ runner.os }}-${{ hashFiles(steps.vars.outputs.ENV_FILE) }} + - name: Install Dependencies + if: ${{ steps.cache-conda-env-ldm.outputs.cache-hit != 'true' }} + run: | + conda env create -f ${{ steps.vars.outputs.ENV_FILE }} + - name: Use Cached Huggingface and Torch models + id: cache-huggingface-torch + uses: actions/cache@v3 + env: + cache-name: cache-huggingface-torch + with: + path: ~/.cache + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }}-${{ hashFiles('scripts/preload_models.py') }} + - name: Download Huggingface and Torch models + if: ${{ steps.cache-huggingface-torch.outputs.cache-hit != 'true' }} + run: | + ${{ steps.vars.outputs.PYTHON_BIN }} scripts/preload_models.py diff --git a/.github/workflows/macos12-miniconda.yml b/.github/workflows/macos12-miniconda.yml deleted file mode 100644 index 18f21277c0..0000000000 --- a/.github/workflows/macos12-miniconda.yml +++ /dev/null @@ -1,80 +0,0 @@ -name: Build -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] -jobs: - build: - strategy: - matrix: - os: [ macos-12 ] - name: Build on ${{ matrix.os }} miniconda - runs-on: ${{ matrix.os }} - steps: - - name: Checkout sources - uses: actions/checkout@v3 - - name: Cache model - id: cache-sd-v1-4 - uses: actions/cache@v3 - env: - cache-name: cache-sd-v1-4 - with: - path: models/ldm/stable-diffusion-v1/model.ckpt - key: ${{ env.cache-name }} - restore-keys: | - ${{ env.cache-name }} - - name: Download Stable Diffusion v1.4 model - if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }} - continue-on-error: true - run: | - if [ ! -e models/ldm/stable-diffusion-v1 ]; then - mkdir -p models/ldm/stable-diffusion-v1 - fi - if [ ! -e models/ldm/stable-diffusion-v1/model.ckpt ]; then - curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }} - fi -# Uncomment this when we no longer make changes to environment-mac.yaml -# - name: Cache environment -# id: cache-conda-env-ldm -# uses: actions/cache@v3 -# env: -# cache-name: cache-conda-env-ldm -# with: -# path: ~/.conda/envs/ldm -# key: ${{ env.cache-name }} -# restore-keys: | -# ${{ env.cache-name }} - - name: Install dependencies -# if: ${{ steps.cache-conda-env-ldm.outputs.cache-hit != 'true' }} - run: | - conda env create -f environment-mac.yaml - - name: Cache hugginface and torch models - id: cache-hugginface-torch - uses: actions/cache@v3 - env: - cache-name: cache-hugginface-torch - with: - path: ~/.cache - key: ${{ env.cache-name }} - restore-keys: | - ${{ env.cache-name }} - - name: Download Huggingface and Torch models - if: ${{ steps.cache-hugginface-torch.outputs.cache-hit != 'true' }} - continue-on-error: true - run: | - export PYTHON_BIN=/usr/local/miniconda/envs/ldm/bin/python - $PYTHON_BIN scripts/preload_models.py - - name: Run the tests - run: | - # Note, can't "activate" via automation, and activation is just env vars and path - export PYTHON_BIN=/usr/local/miniconda/envs/ldm/bin/python - export PYTORCH_ENABLE_MPS_FALLBACK=1 - $PYTHON_BIN scripts/preload_models.py - mkdir -p outputs/img-samples - time $PYTHON_BIN scripts/dream.py --from_file tests/prompts.txt outputs/img-samples/err.log > outputs/img-samples/out.log - - name: Archive results - uses: actions/upload-artifact@v3 - with: - name: results - path: outputs/img-samples \ No newline at end of file diff --git a/.github/workflows/test-dream-conda.yml b/.github/workflows/test-dream-conda.yml new file mode 100644 index 0000000000..3bd9b24582 --- /dev/null +++ b/.github/workflows/test-dream-conda.yml @@ -0,0 +1,97 @@ +name: Test Dream with Conda +on: + push: + branches: + - 'main' + - 'development' +jobs: + os_matrix: + strategy: + matrix: + os: [ ubuntu-latest, macos-12 ] + name: Test dream.py on ${{ matrix.os }} with conda + runs-on: ${{ matrix.os }} + steps: + - run: | + echo The PR was merged + - name: Set platform variables + id: vars + run: | + # Note, can't "activate" via github action; specifying the env's python has the same effect + if [ "$RUNNER_OS" = "macOS" ]; then + echo "::set-output name=ENV_FILE::environment-mac.yaml" + echo "::set-output name=PYTHON_BIN::/usr/local/miniconda/envs/ldm/bin/python" + elif [ "$RUNNER_OS" = "Linux" ]; then + echo "::set-output name=ENV_FILE::environment.yaml" + echo "::set-output name=PYTHON_BIN::/usr/share/miniconda/envs/ldm/bin/python" + fi + - name: Checkout sources + uses: actions/checkout@v3 + - name: Use Cached Stable Diffusion v1.4 Model + id: cache-sd-v1-4 + uses: actions/cache@v3 + env: + cache-name: cache-sd-v1-4 + with: + path: models/ldm/stable-diffusion-v1/model.ckpt + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }} + - name: Download Stable Diffusion v1.4 Model + if: ${{ steps.cache-sd-v1-4.outputs.cache-hit != 'true' }} + run: | + if [ ! -e models/ldm/stable-diffusion-v1 ]; then + mkdir -p models/ldm/stable-diffusion-v1 + fi + if [ ! -e models/ldm/stable-diffusion-v1/model.ckpt ]; then + curl -o models/ldm/stable-diffusion-v1/model.ckpt ${{ secrets.SD_V1_4_URL }} + fi + - name: Use Cached Dependencies + id: cache-conda-env-ldm + uses: actions/cache@v3 + env: + cache-name: cache-conda-env-ldm + with: + path: ~/.conda/envs/ldm + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }}-${{ runner.os }}-${{ hashFiles(steps.vars.outputs.ENV_FILE) }} + - name: Install Dependencies + if: ${{ steps.cache-conda-env-ldm.outputs.cache-hit != 'true' }} + run: | + conda env create -f ${{ steps.vars.outputs.ENV_FILE }} + - name: Use Cached Huggingface and Torch models + id: cache-hugginface-torch + uses: actions/cache@v3 + env: + cache-name: cache-hugginface-torch + with: + path: ~/.cache + key: ${{ env.cache-name }} + restore-keys: | + ${{ env.cache-name }}-${{ hashFiles('scripts/preload_models.py') }} + - name: Download Huggingface and Torch models + if: ${{ steps.cache-hugginface-torch.outputs.cache-hit != 'true' }} + run: | + ${{ steps.vars.outputs.PYTHON_BIN }} scripts/preload_models.py +# - name: Run tmate +# uses: mxschmitt/action-tmate@v3 +# timeout-minutes: 30 + - name: Run the tests + run: | + # Note, can't "activate" via github action; specifying the env's python has the same effect + if [ $(uname) = "Darwin" ]; then + export PYTORCH_ENABLE_MPS_FALLBACK=1 + fi + # Utterly hacky, but I don't know how else to do this + if [[ ${{ github.ref }} == 'refs/heads/master' ]]; then + time ${{ steps.vars.outputs.PYTHON_BIN }} scripts/dream.py --from_file tests/preflight_prompts.txt --full_precision + elif [[ ${{ github.ref }} == 'refs/heads/development' ]]; then + time ${{ steps.vars.outputs.PYTHON_BIN }} scripts/dream.py --from_file tests/dev_prompts.txt --full_precision + fi + mkdir -p outputs/img-samples + - name: Archive results + uses: actions/upload-artifact@v3 + with: + name: results + path: outputs/img-samples diff --git a/docs/installation/INSTALL_MAC.md b/docs/installation/INSTALL_MAC.md index 39398c36ac..71535980f5 100644 --- a/docs/installation/INSTALL_MAC.md +++ b/docs/installation/INSTALL_MAC.md @@ -7,10 +7,7 @@ title: macOS - macOS 12.3 Monterey or later - Python - Patience -- Apple Silicon\* - -\*I haven't tested any of this on Intel Macs but I have read that one person got -it to work, so Apple Silicon might not be requried. +- Apple Silicon or Intel Mac Things have moved really fast and so these instructions change often and are often out-of-date. One of the problems is that there are so many different ways @@ -59,9 +56,13 @@ First get the weights checkpoint download started - it's big: # install python 3, git, cmake, protobuf: brew install cmake protobuf rust -# install miniconda (M1 arm64 version): - curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o Miniconda3-latest-MacOSX-arm64.sh - /bin/bash Miniconda3-latest-MacOSX-arm64.sh +# install miniconda for M1 arm64: +curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o Miniconda3-latest-MacOSX-arm64.sh +/bin/bash Miniconda3-latest-MacOSX-arm64.sh + +# OR install miniconda for Intel: +curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -o Miniconda3-latest-MacOSX-x86_64.sh +/bin/bash Miniconda3-latest-MacOSX-x86_64.sh # EITHER WAY, @@ -82,15 +83,22 @@ brew install cmake protobuf rust ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt -# install packages - PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yaml - conda activate ldm +# install packages for arm64 +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yaml +conda activate ldm + +# OR install packages for x86_64 +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-x86_64 conda env create -f environment-mac.yaml +conda activate ldm # only need to do this once python scripts/preload_models.py # run SD! python scripts/dream.py --full_precision # half-precision requires autocast and won't work + +# or run the web interface! +python scripts/dream.py --web ``` The original scripts should work as well. @@ -181,7 +189,12 @@ There are several causes of these errors. - Third, if it says you're missing taming you need to rebuild your virtual environment. -`conda env remove -n ldm conda env create -f environment-mac.yaml` +````bash +conda deactivate + +conda env remove -n ldm +PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yaml +``` Fourth, If you have activated the ldm virtual environment and tried rebuilding it, maybe the problem could be that I have something installed that you don't diff --git a/ldm/dream/server.py b/ldm/dream/server.py index cde3957a1f..372d719052 100644 --- a/ldm/dream/server.py +++ b/ldm/dream/server.py @@ -228,7 +228,8 @@ class DreamServer(BaseHTTPRequestHandler): nonlocal step_index if opt.progress_images and step % 5 == 0 and step < opt.steps - 1: image = self.model.sample_to_image(sample) - name = f'{prefix}.{opt.seed}.{step_index}.png' + step_index_padded = str(step_index).rjust(len(str(opt.steps)), '0') + name = f'{prefix}.{opt.seed}.{step_index_padded}.png' metadata = f'{opt.prompt} -S{opt.seed} [intermediate]' path = step_writer.save_image_and_prompt_to_png(image, dream_prompt=metadata, name=name) step_index += 1 diff --git a/ldm/modules/embedding_manager.py b/ldm/modules/embedding_manager.py index b579bcd885..09e6f495ab 100644 --- a/ldm/modules/embedding_manager.py +++ b/ldm/modules/embedding_manager.py @@ -82,7 +82,9 @@ class EmbeddingManager(nn.Module): get_embedding_for_clip_token, embedder.transformer.text_model.embeddings, ) - token_dim = 1280 + # per bug report #572 + #token_dim = 1280 + token_dim = 768 else: # using LDM's BERT encoder self.is_clip = False get_token_for_string = partial( diff --git a/tests/dev_prompts.txt b/tests/dev_prompts.txt new file mode 100644 index 0000000000..9ebca4e9f7 --- /dev/null +++ b/tests/dev_prompts.txt @@ -0,0 +1 @@ +banana sushi -Ak_lms -S42 diff --git a/tests/preflight_prompts.txt b/tests/preflight_prompts.txt new file mode 100644 index 0000000000..5c5b8233a1 --- /dev/null +++ b/tests/preflight_prompts.txt @@ -0,0 +1,9 @@ +banana sushi -Ak_lms -S42 +banana sushi -Addim -S42 +banana sushi -Ak_lms -W640 -H480 -S42 +banana sushi -Ak_lms -S42 -G1 -U 2 0.5 +banana sushi -Ak_lms -S42 -v0.2 -n3 +banana sushi -Ak_lms -S42 -V1349749425:0.1,4145759947:0.1 +snake -I outputs/preflight/000006.4145759947.png -S42 +snake -I outputs/preflight/000006.4145759947.png -S42 -W640 -H640 --fit +strawberry sushi -I./image-and-mask.png -S42 -f0.9 -s100 -C15 \ No newline at end of file diff --git a/tests/prompts.txt b/tests/prompts.txt deleted file mode 100644 index 955220a5e6..0000000000 --- a/tests/prompts.txt +++ /dev/null @@ -1 +0,0 @@ -test trending on artstation -s 1 -S 1