ci: use diffusers model

This commit is contained in:
Kevin Turner 2022-11-24 18:38:08 -08:00
parent b7864aa1a7
commit 5cee2111be
2 changed files with 42 additions and 44 deletions

View File

@ -14,7 +14,7 @@ jobs:
strategy:
matrix:
stable-diffusion-model:
- 'stable-diffusion-1.5'
- 'diffusers-1.5'
environment-yaml:
- environment-lin-amd.yml
- environment-lin-cuda.yml
@ -29,13 +29,7 @@ jobs:
- environment-yaml: environment-mac.yml
os: macos-12
default-shell: bash -l {0}
# - stable-diffusion-model: https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt
# stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/sd-v1-4.ckpt
# stable-diffusion-model-switch: stable-diffusion-1.4
- stable-diffusion-model: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt
stable-diffusion-model-switch: diffusers-1.5
name: ${{ matrix.os }} with ${{ matrix.stable-diffusion-model-switch }}
name: ${{ matrix.environment-yaml }} on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
env:
CONDA_ENV_NAME: invokeai
@ -62,7 +56,7 @@ jobs:
id: cache-sd-model
uses: actions/cache@v3
env:
cache-name: huggingface-${{ matrix.stable-diffusion-model-switch }}
cache-name: huggingface-${{ matrix.stable-diffusion-model }}
with:
path: ~/.cache/huggingface
key: ${{ env.cache-name }}
@ -106,7 +100,7 @@ jobs:
echo -n '${{ secrets.HUGGINGFACE_TOKEN }}' > ~/.huggingface/token
fi
python scripts/preload_models.py \
--no-interactive \
--no-interactive --yes \
--full-precision # can't use fp16 weights without a GPU
- name: cat ~/.invokeai
@ -116,6 +110,7 @@ jobs:
- name: Run the tests
id: run-tests
env:
# Set offline mode to make sure configure preloaded successfully.
HF_HUB_OFFLINE: 1
HF_DATASETS_OFFLINE: 1
TRANSFORMERS_OFFLINE: 1

View File

@ -1,20 +1,20 @@
name: Test invoke.py pip
on:
push:
branches:
- 'main'
- 'development'
pull_request:
branches:
- 'main'
- 'development'
on: [push, pull_request]
jobs:
matrix:
# Run on:
# - pull requests
# - pushes to forks (will run in the forked project with that fork's secrets)
# - pushes to branches that are *not* pull requests
if: |
github.event_name == 'pull_request'
|| github.repository != 'invoke-ai/InvokeAI'
|| github.ref_protected
strategy:
matrix:
stable-diffusion-model:
- stable-diffusion-1.5
- diffusers-1.5
requirements-file:
- requirements-lin-cuda.txt
- requirements-lin-amd.txt
@ -32,10 +32,6 @@ jobs:
- requirements-file: requirements-mac-mps-cpu.txt
os: macOS-12
default-shell: bash -l {0}
- stable-diffusion-model: stable-diffusion-1.5
stable-diffusion-model-url: https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
stable-diffusion-model-dl-path: models/ldm/stable-diffusion-v1
stable-diffusion-model-dl-name: v1-5-pruned-emaonly.ckpt
name: ${{ matrix.requirements-file }} on ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
defaults:
@ -43,6 +39,8 @@ jobs:
shell: ${{ matrix.default-shell }}
env:
INVOKEAI_ROOT: '${{ github.workspace }}/invokeai'
PYTHONUNBUFFERED: 1
HAVE_SECRETS: ${{ secrets.HUGGINGFACE_TOKEN != '' }}
steps:
- name: Checkout sources
id: checkout-sources
@ -53,6 +51,19 @@ jobs:
mkdir -p ${{ env.INVOKEAI_ROOT }}/configs
cp configs/models.yaml.example ${{ env.INVOKEAI_ROOT }}/configs/models.yaml
- name: Use Cached Stable Diffusion Model
id: cache-sd-model
uses: actions/cache@v3
env:
cache-name: huggingface-${{ matrix.stable-diffusion-model }}
with:
path: ~/.cache/huggingface
key: ${{ env.cache-name }}
- name: Check model availability
if: steps.cache-sd-model.outputs.cache-hit != true && env.HAVE_SECRETS != 'true'
run: echo -e '\a ⛔ GitHub model cache not found, and no HUGGINGFACE_TOKEN is available. Will not be able to load Stable Diffusion.' ; exit 1
- name: set test prompt to main branch validation
if: ${{ github.ref == 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> $GITHUB_ENV
@ -81,29 +92,16 @@ jobs:
- name: install requirements
run: ${{ env.pythonLocation }}/bin/pip install -r '${{ matrix.requirements-file }}'
- name: Use Cached Stable Diffusion Model
id: cache-sd-model
uses: actions/cache@v3
env:
cache-name: cache-${{ matrix.stable-diffusion-model }}
with:
path: ${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}
key: ${{ env.cache-name }}
- name: Download ${{ matrix.stable-diffusion-model }}
id: download-stable-diffusion-model
if: ${{ steps.cache-sd-model.outputs.cache-hit != 'true' }}
run: |
mkdir -p "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}"
curl \
-H "Authorization: Bearer ${{ secrets.HUGGINGFACE_TOKEN }}" \
-o "${{ env.INVOKEAI_ROOT }}/${{ matrix.stable-diffusion-model-dl-path }}/${{ matrix.stable-diffusion-model-dl-name }}" \
-L ${{ matrix.stable-diffusion-model-url }}
- name: run configure_invokeai.py
id: run-preload-models
run: |
${{ env.pythonLocation }}/bin/python scripts/configure_invokeai.py --no-interactive --yes
if [ "${HAVE_SECRETS}" == true ] ; then
mkdir -p ~/.huggingface
echo -n '${{ secrets.HUGGINGFACE_TOKEN }}' > ~/.huggingface/token
fi
${{ env.pythonLocation }}/bin/python scripts/configure_invokeai.py \
--no-interactive --yes \
--full-precision # can't use fp16 weights without a GPU
- name: cat ~/.invokeai
id: cat-invokeai
@ -111,6 +109,11 @@ jobs:
- name: Run the tests
id: run-tests
env:
# Set offline mode to make sure configure preloaded successfully.
HF_HUB_OFFLINE: 1
HF_DATASETS_OFFLINE: 1
TRANSFORMERS_OFFLINE: 1
run: |
time ${{ env.pythonLocation }}/bin/python scripts/invoke.py \
--no-patchmatch \