Update test-invoke-pip.yml (#2524)

test-invoke-pip.yml:
- enable caching of pip dependencies in `actions/setup-python@v4`
- add workflow_dispatch trigger
- fix indentation in concurrency
- set env `PIP_USE_PEP517: '1'`
- cache python dependencies
- remove models cache (since we currently use 190.96 GB of 10 GB while I
am writing this)
- add step to set `INVOKEAI_OUTDIR`
- add outdir arg to invokeai
- fix path in archive results

model_manager.py:
- read files in chunks when calculating sha (windows runner is crashing
otherwise)
This commit is contained in:
Matthias Wild 2023-02-06 12:56:15 +01:00 committed by GitHub
commit a485d45400
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 27 additions and 37 deletions

View File

@ -8,10 +8,11 @@ on:
- 'ready_for_review'
- 'opened'
- 'synchronize'
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
matrix:
@ -62,28 +63,13 @@ jobs:
# github-env: $env:GITHUB_ENV
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
env:
PIP_USE_PEP517: '1'
steps:
- name: Checkout sources
id: checkout-sources
uses: actions/checkout@v3
- name: setup python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Set Cache-Directory Windows
if: runner.os == 'Windows'
id: set-cache-dir-windows
run: |
echo "CACHE_DIR=$HOME\invokeai\models" >> ${{ matrix.github-env }}
echo "PIP_NO_CACHE_DIR=1" >> ${{ matrix.github-env }}
- name: Set Cache-Directory others
if: runner.os != 'Windows'
id: set-cache-dir-others
run: echo "CACHE_DIR=$HOME/invokeai/models" >> ${{ matrix.github-env }}
- name: set test prompt to main branch validation
if: ${{ github.ref == 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
@ -92,26 +78,29 @@ jobs:
if: ${{ github.ref != 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
- name: setup python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: pip
cache-dependency-path: pyproject.toml
- name: install invokeai
env:
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
run: >
pip3 install
--use-pep517
--editable=".[test]"
- name: run pytest
id: run-pytest
run: pytest
- name: Use Cached models
id: cache-sd-model
uses: actions/cache@v3
env:
cache-name: huggingface-models
with:
path: ${{ env.CACHE_DIR }}
key: ${{ env.cache-name }}
enableCrossOsArchive: true
- name: set INVOKEAI_OUTDIR
run: >
python -c
"import os;from ldm.invoke.globals import Globals;OUTDIR=os.path.join(Globals.root,str('outputs'));print(f'INVOKEAI_OUTDIR={OUTDIR}')"
>> ${{ matrix.github-env }}
- name: run invokeai-configure
id: run-preload-models
@ -124,9 +113,8 @@ jobs:
--full-precision
# can't use fp16 weights without a GPU
- name: Run the tests
if: runner.os != 'Windows'
id: run-tests
- name: run invokeai
id: run-invokeai
env:
# Set offline mode to make sure configure preloaded successfully.
HF_HUB_OFFLINE: 1
@ -137,10 +125,11 @@ jobs:
--no-patchmatch
--no-nsfw_checker
--from_file ${{ env.TEST_PROMPTS }}
--outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }}
- name: Archive results
id: archive-results
uses: actions/upload-artifact@v3
with:
name: results_${{ matrix.pytorch }}_${{ matrix.python-version }}
path: ${{ env.INVOKEAI_ROOT }}/outputs
name: results
path: ${{ env.INVOKEAI_OUTDIR }}

View File

@ -753,7 +753,7 @@ class ModelManager(object):
return search_folder, found_models
def _choose_diffusers_vae(self, model_name:str, vae:str=None)->Union[dict,str]:
# In the event that the original entry is using a custom ckpt VAE, we try to
# map that VAE onto a diffuser VAE using a hard-coded dictionary.
# I would prefer to do this differently: We load the ckpt model into memory, swap the
@ -954,7 +954,7 @@ class ModelManager(object):
def _has_cuda(self) -> bool:
return self.device.type == 'cuda'
def _diffuser_sha256(self,name_or_path:Union[str, Path])->Union[str,bytes]:
def _diffuser_sha256(self,name_or_path:Union[str, Path],chunksize=4096)->Union[str,bytes]:
path = None
if isinstance(name_or_path,Path):
path = name_or_path
@ -976,7 +976,8 @@ class ModelManager(object):
for name in files:
count += 1
with open(os.path.join(root,name),'rb') as f:
sha.update(f.read())
while chunk := f.read(chunksize):
sha.update(chunk)
hash = sha.hexdigest()
toc = time.time()
print(f' | sha256 = {hash} ({count} files hashed in','%4.2fs)' % (toc - tic))