From 5afb63e41b3e0b2cb0ac2c67d7ba4fbb532fe762 Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 17 Jan 2023 03:00:02 +0100 Subject: [PATCH 1/9] replace legacy setup.py with pyproject.toml other changes which where required: - move configure_invokeai.py into ldm.invoke - update files which imported configure_invokeai to use new location: - ldm/invoke/CLI.py - scripts/load_models.py - scripts/preload_models.py - update test-invoke-pip.yml: - remove pr type "converted_to_draft" - remove reference to dev/diffusers - remove no more needed requirements from matrix - add pytorch to matrix - install via `pip3 install --use-pep517 .` - use the created executables - this should also fix configure_invoke not executed in windows To install use `pip install --use-pep517 -e .` where `-e` is optional --- .github/workflows/test-invoke-pip.yml | 60 ++++---- ldm/invoke/CLI.py | 18 +-- {scripts => ldm/invoke}/configure_invokeai.py | 0 pyproject.toml | 130 ++++++++++++++++++ scripts/load_models.py | 2 +- scripts/preload_models.py | 2 +- setup.py | 99 ------------- 7 files changed, 169 insertions(+), 142 deletions(-) rename {scripts => ldm/invoke}/configure_invokeai.py (100%) create mode 100644 pyproject.toml delete mode 100644 setup.py diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml index 4985202782..1b197283d4 100644 --- a/.github/workflows/test-invoke-pip.yml +++ b/.github/workflows/test-invoke-pip.yml @@ -8,47 +8,49 @@ on: - 'ready_for_review' - 'opened' - 'synchronize' - - 'converted_to_draft' concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: - # fail_if_pull_request_is_draft: - # if: github.event.pull_request.draft == true && github.head_ref != 'dev/diffusers' - # runs-on: ubuntu-18.04 - # steps: - # - name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass. - # run: exit 1 matrix: - if: github.event.pull_request.draft == false || github.head_ref == 'dev/diffusers' + if: github.event.pull_request.draft == false strategy: matrix: stable-diffusion-model: - stable-diffusion-1.5 - requirements-file: - - requirements-lin-cuda.txt - - requirements-lin-amd.txt - - requirements-mac-mps-cpu.txt - - requirements-win-colab-cuda.txt python-version: # - '3.9' - '3.10' + pytorch: + - linux-cuda-11_7 + - linux-rocm-5_2 + # - linux-cpu + - macos-default + # - windows-cpu + - windows-cuda-11_7 include: - - requirements-file: requirements-lin-cuda.txt + - pytorch: linux-cuda-11_7 os: ubuntu-22.04 github-env: $GITHUB_ENV - - requirements-file: requirements-lin-amd.txt + - pytorch: linux-rocm-5_2 os: ubuntu-22.04 + extra-index-url: 'https://download.pytorch.org/whl/rocm5.2' github-env: $GITHUB_ENV - - requirements-file: requirements-mac-mps-cpu.txt + # - pytorch: linux-cpu + # os: ubuntu-22.04 + # extra-index-url: 'https://download.pytorch.org/whl/cpu' + - pytorch: macos os: macOS-12 github-env: $GITHUB_ENV - - requirements-file: requirements-win-colab-cuda.txt + # - pytorch: windows-cpu + # os: windows-2022 + - pytorch: windows-cuda-11_7 os: windows-2022 + extra-index-url: 'https://download.pytorch.org/whl/cu117' github-env: $env:GITHUB_ENV - name: ${{ matrix.requirements-file }} on ${{ matrix.python-version }} + name: ${{ matrix.pytorch }} on ${{ matrix.python-version }} runs-on: ${{ matrix.os }} env: INVOKE_MODEL_RECONFIGURE: '--yes' @@ -96,28 +98,22 @@ jobs: if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }} run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }} - - name: create requirements.txt - run: cp 'environments-and-requirements/${{ matrix.requirements-file }}' '${{ matrix.requirements-file }}' - - name: setup python uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - # cache: 'pip' - # cache-dependency-path: ${{ matrix.requirements-file }} - - name: install dependencies - run: pip3 install --upgrade pip setuptools wheel + - name: install invokeai + run: pip3 install --use-pep517 . + env: + PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }} - - name: install requirements - run: pip3 install -r '${{ matrix.requirements-file }}' - - - name: run configure_invokeai.py + - name: run configure_invokeai id: run-preload-models env: HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGINGFACE_TOKEN }} run: > - configure_invokeai.py + configure_invokeai --yes --default_only --full-precision # can't use fp16 weights without a GPU @@ -131,7 +127,7 @@ jobs: HF_DATASETS_OFFLINE: 1 TRANSFORMERS_OFFLINE: 1 run: > - python3 scripts/invoke.py + invoke --no-patchmatch --no-nsfw_checker --model ${{ matrix.stable-diffusion-model }} @@ -144,5 +140,5 @@ jobs: if: matrix.os != 'windows-2022' uses: actions/upload-artifact@v3 with: - name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }} + name: results_${{ matrix.pytorch }}_${{ matrix.python-version }} path: ${{ env.INVOKEAI_ROOT }}/outputs diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index ef6389c7cc..3bd8866d97 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -485,7 +485,7 @@ def do_command(command:str, gen, opt:Args, completer) -> tuple: optimize_model(path[1], gen, opt, completer) completer.add_history(command) operation = None - + elif command.startswith('!optimize'): path = shlex.split(command) @@ -570,11 +570,11 @@ def import_model(model_path:str, gen, opt, completer): (3) a huggingface repository id ''' model_name = None - + if model_path.startswith(('http:','https:','ftp:')): model_name = import_ckpt_model(model_path, gen, opt, completer) elif os.path.exists(model_path) and model_path.endswith('.ckpt') and os.path.isfile(model_path): - model_name = import_ckpt_model(model_path, gen, opt, completer) + model_name = import_ckpt_model(model_path, gen, opt, completer) elif re.match('^[\w.+-]+/[\w.+-]+$',model_path): model_name = import_diffuser_model(model_path, gen, opt, completer) elif os.path.isdir(model_path): @@ -584,12 +584,12 @@ def import_model(model_path:str, gen, opt, completer): if not model_name: return - + if not _verify_load(model_name, gen): print('** model failed to load. Discarding configuration entry') gen.model_manager.del_model(model_name) return - + if input('Make this the default model? [n] ') in ('y','Y'): gen.model_manager.set_default_model(model_name) @@ -690,7 +690,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer): else: print(f'** {model_name_or_path} is neither an existing model nor the path to a .ckpt file') return - + if not ckpt_path.is_absolute(): ckpt_path = Path(Globals.root,ckpt_path) @@ -698,7 +698,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer): if diffuser_path.exists(): print(f'** {model_name_or_path} is already optimized. Will not overwrite. If this is an error, please remove the directory {diffuser_path} and try again.') return - + new_config = gen.model_manager.convert_and_import( ckpt_path, diffuser_path, @@ -747,7 +747,7 @@ def edit_model(model_name:str, gen, opt, completer): continue completer.set_line(info[attribute]) info[attribute] = input(f'{attribute}: ') or info[attribute] - + if new_name != model_name: manager.del_model(model_name) @@ -1099,7 +1099,7 @@ def report_model_error(opt:Namespace, e:Exception): if yes_to_all is not None: sys.argv.append(yes_to_all) - import configure_invokeai + import ldm.invoke.configure_invokeai as configure_invokeai configure_invokeai.main() print('** InvokeAI will now restart') sys.argv = previous_args diff --git a/scripts/configure_invokeai.py b/ldm/invoke/configure_invokeai.py similarity index 100% rename from scripts/configure_invokeai.py rename to ldm/invoke/configure_invokeai.py diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..5ae6a9b5ea --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,130 @@ +[build-system] +requires = ["setuptools~=65.5.0", "pip~=22.3.1", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "InvokeAI" +description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process" +requires-python = ">=3.9, <3.11" +readme = { content-type = "text/markdown", file = "README.md" } +keywords = ["stable-diffusion", "AI"] +dynamic = ["version"] +license = { file = "LICENSE" } +authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }] +classifiers = [ + 'Development Status :: 4 - Beta', + 'Environment :: GPU', + 'Environment :: GPU :: NVIDIA CUDA', + 'Environment :: MacOS X', + 'Intended Audience :: End Users/Desktop', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: POSIX :: Linux', + 'Operating System :: MacOS', + 'Operating System :: Microsoft :: Windows', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Topic :: Artistic Software', + 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', + 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server', + 'Topic :: Multimedia :: Graphics', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Scientific/Engineering :: Image Processing', +] +dependencies = [ + "accelerate", + "albumentations", + "clip_anytorch", # replaceing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", + "clipseg @ https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip", # is this still necesarry with diffusers? + "datasets", + "diffusers[torch]~=0.11", + "dnspython==2.2.1", + "einops", + "eventlet", + "facexlib", + "flask==2.1.3", + "flask_cors==3.0.10", + "flask_socketio==5.3.0", + "flaskwebgui==1.0.3", + "getpass_asterisk", + "gfpgan==1.3.8", + "huggingface-hub>=0.11.1", + "imageio", + "imageio-ffmpeg", + "k-diffusion", # replaceing "k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip", + "kornia", + "npyscreen", + "numpy<1.24,>=1.23", + "omegaconf", + "opencv-python", + "picklescan", + "pillow", + "pudb", + "pypatchmatch", + "pyreadline3", + "pytorch-lightning==1.7.7", + "realesrgan", + "requests==2.25.1", + "safetensors", + "scikit-image>=0.19", + "send2trash", + "streamlit", + "taming-transformers-rom1504", + "test-tube>=0.7.5", + "torch>=1.13.1", + "torch-fidelity", + "torchvision>=0.14.1", + "torchmetrics", + "transformers~=4.25", + "windows-curses; sys_platform=='win32'", +] + +[project.optional-dependencies] +"dist" = ["pip-tools", "pipdeptree", "twine"] +"docs" = [ + "mkdocs-material<9.0", + "mkdocs-git-revision-date-localized-plugin", + "mkdocs-redirects==1.2.0", +] +test = ["pytest>6.0.0", "pytest-cov"] + +[project.scripts] +"configure_invokeai" = "ldm.invoke.configure_invokeai:main" +"dream" = "ldm.invoke:CLI.main" +"invoke" = "ldm.invoke:CLI.main" +"legacy_api" = "scripts:legacy_api.main" +"load_models" = "scripts:configure_invokeai.main" +"merge_embeddings" = "scripts:merge_embeddings.main" +"preload_models" = "ldm.invoke.configure_invokeai:main" + +[project.urls] +"Homepage" = "https://invoke-ai.github.io/InvokeAI/" +"Documentation" = "https://invoke-ai.github.io/InvokeAI/" +"Source" = "https://github.com/invoke-ai/InvokeAI/" +"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues" +"Discord" = "https://discord.gg/ZmtBAhwWhy" + +[tool.setuptools.dynamic] +version = { attr = "ldm.invoke.__version__" } + +[tool.setuptools.packages.find] +"where" = ["."] +"include" = ["assets", "backend*", "configs*", "frontend.dist*", "ldm*"] + +[tool.setuptools.package-data] +"assets" = ["caution.png"] +"backend" = ["**.png"] +"configs" = ["*.example", "**/*.yaml", "*.txt"] +"frontend.dist" = ["**"] + +[tool.setuptools.exclude-package-data] +configs = ["models.yaml"] + +[tool.pytest.ini_options] +minversion = "6.0" +addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov=./ldm/ --cov=./backend --cov-branch" +python_files = ["test_*.py"] +pythonpath = [".venv/lib/python3.9", ".venv/lib/python3.10"] +testpaths = ["tests"] diff --git a/scripts/load_models.py b/scripts/load_models.py index e64d0b821d..89ae5d7588 100644 --- a/scripts/load_models.py +++ b/scripts/load_models.py @@ -5,7 +5,7 @@ # two machines must share a common .cache directory. import warnings -import configure_invokeai +import ldm.invoke.configure_invokeai as configure_invokeai if __name__ == '__main__': configure_invokeai.main() diff --git a/scripts/preload_models.py b/scripts/preload_models.py index e64d0b821d..89ae5d7588 100755 --- a/scripts/preload_models.py +++ b/scripts/preload_models.py @@ -5,7 +5,7 @@ # two machines must share a common .cache directory. import warnings -import configure_invokeai +import ldm.invoke.configure_invokeai as configure_invokeai if __name__ == '__main__': configure_invokeai.main() diff --git a/setup.py b/setup.py deleted file mode 100644 index afec71b967..0000000000 --- a/setup.py +++ /dev/null @@ -1,99 +0,0 @@ -import sys -import os -import re -from setuptools import setup, find_packages - -def list_files(directory): - listing = list() - for root, dirs, files in os.walk(directory,topdown=False): - pair = (root,[os.path.join(root,f) for f in files]) - listing.append(pair) - return listing - - -def get_version()->str: - from ldm.invoke import __version__ as version - return version - -# The canonical version number is stored in the file ldm/invoke/_version.py -VERSION = get_version() -DESCRIPTION = ('An implementation of Stable Diffusion which provides various new features' - ' and options to aid the image generation process') -LONG_DESCRIPTION = ('This version of Stable Diffusion features a slick WebGUI, an' - ' interactive command-line script that combines text2img and img2img' - ' functionality in a "dream bot" style interface, and multiple features' - ' and other enhancements.') -HOMEPAGE = 'https://github.com/invoke-ai/InvokeAI' -FRONTEND_FILES = list_files('frontend/dist') -FRONTEND_FILES.append(('assets',['assets/caution.png'])) -print(FRONTEND_FILES) - -REQUIREMENTS=[ - 'accelerate', - 'albumentations', - 'diffusers', - 'eventlet', - 'flask_cors', - 'flask_socketio', - 'flaskwebgui', - 'getpass_asterisk', - 'imageio-ffmpeg', - 'pyreadline3', - 'realesrgan', - 'send2trash', - 'streamlit', - 'taming-transformers-rom1504', - 'test-tube', - 'torch-fidelity', - 'torch', - 'torchvision', - 'transformers', - 'picklescan', - 'clip', - 'clipseg', - 'gfpgan', - 'k-diffusion', - 'pypatchmatch', -] - -setup( - name='InvokeAI', - version=VERSION, - description=DESCRIPTION, - long_description=LONG_DESCRIPTION, - author='The InvokeAI Project', - author_email='lincoln.stein@gmail.com', - url=HOMEPAGE, - license='MIT', - packages=find_packages(exclude=['tests.*']), - install_requires=REQUIREMENTS, - dependency_links=['https://download.pytorch.org/whl/torch_stable.html'], - python_requires='>=3.9, <4', - classifiers=[ - 'Development Status :: 4 - Beta', - 'Environment :: GPU', - 'Environment :: GPU :: NVIDIA CUDA', - 'Environment :: MacOS X', - 'Intended Audience :: End Users/Desktop', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: MIT License', - 'Operating System :: POSIX :: Linux', - 'Operating System :: MacOS', - 'Operating System :: Microsoft :: Windows', - 'Programming Language :: Python :: 3 :: Only,' - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Topic :: Artistic Software', - 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', - 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server', - 'Topic :: Multimedia :: Graphics', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', - 'Topic :: Scientific/Engineering :: Image Processing', - ], - scripts = ['scripts/invoke.py','scripts/configure_invokeai.py', 'scripts/sd-metadata.py', - 'scripts/preload_models.py', 'scripts/images2prompt.py','scripts/merge_embeddings.py', - 'scripts/textual_inversion_fe.py','scripts/textual_inversion.py' - ], - data_files=FRONTEND_FILES, -) From 5a788f8f736e223f42bc4045b490539deafd2f68 Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 17 Jan 2023 03:10:34 +0100 Subject: [PATCH 2/9] fix test-invoke-pip.yml matrix --- .github/workflows/test-invoke-pip.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml index 1b197283d4..451effeba0 100644 --- a/.github/workflows/test-invoke-pip.yml +++ b/.github/workflows/test-invoke-pip.yml @@ -41,7 +41,7 @@ jobs: # - pytorch: linux-cpu # os: ubuntu-22.04 # extra-index-url: 'https://download.pytorch.org/whl/cpu' - - pytorch: macos + - pytorch: macos-default os: macOS-12 github-env: $GITHUB_ENV # - pytorch: windows-cpu From 963b666844cb27548a14dbcc97481dfd50758a9c Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 17 Jan 2023 06:14:54 +0100 Subject: [PATCH 3/9] fix memory issue on windows runner - use cpu version which is only 162.6 MB - set `INVOKEAI_ROOT=C:\InvokeAI` on Windows runners --- .github/workflows/test-invoke-pip.yml | 36 ++++++++++++--------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml index 451effeba0..0feecb6eb1 100644 --- a/.github/workflows/test-invoke-pip.yml +++ b/.github/workflows/test-invoke-pip.yml @@ -3,6 +3,7 @@ on: push: branches: - 'main' + - 'debug/dev/pyproject-toml' pull_request: types: - 'ready_for_review' @@ -28,8 +29,8 @@ jobs: - linux-rocm-5_2 # - linux-cpu - macos-default - # - windows-cpu - - windows-cuda-11_7 + - windows-cpu + # - windows-cuda-11_7 include: - pytorch: linux-cuda-11_7 os: ubuntu-22.04 @@ -41,38 +42,34 @@ jobs: # - pytorch: linux-cpu # os: ubuntu-22.04 # extra-index-url: 'https://download.pytorch.org/whl/cpu' + # github-env: $GITHUB_ENV - pytorch: macos-default os: macOS-12 github-env: $GITHUB_ENV - # - pytorch: windows-cpu - # os: windows-2022 - - pytorch: windows-cuda-11_7 + - pytorch: windows-cpu os: windows-2022 - extra-index-url: 'https://download.pytorch.org/whl/cu117' github-env: $env:GITHUB_ENV + # - pytorch: windows-cuda-11_7 + # os: windows-2022 + # extra-index-url: 'https://download.pytorch.org/whl/cu117' + # github-env: $env:GITHUB_ENV name: ${{ matrix.pytorch }} on ${{ matrix.python-version }} runs-on: ${{ matrix.os }} env: + INVOKEAI_ROOT: ${{ github.workspace }} INVOKE_MODEL_RECONFIGURE: '--yes' - INVOKEAI_ROOT: '${{ github.workspace }}/invokeai' PYTHONUNBUFFERED: 1 - HAVE_SECRETS: ${{ secrets.HUGGINGFACE_TOKEN != '' }} steps: - name: Checkout sources id: checkout-sources uses: actions/checkout@v3 - name: set INVOKEAI_ROOT Windows - if: matrix.os == 'windows-2022' + if: runner.os == 'Windows' run: | - echo "INVOKEAI_ROOT=${{ github.workspace }}\invokeai" >> ${{ matrix.github-env }} - echo "INVOKEAI_OUTDIR=${{ github.workspace }}\invokeai\outputs" >> ${{ matrix.github-env }} - - - name: set INVOKEAI_ROOT others - if: matrix.os != 'windows-2022' - run: | - echo "INVOKEAI_ROOT=${{ github.workspace }}/invokeai" >> ${{ matrix.github-env }} - echo "INVOKEAI_OUTDIR=${{ github.workspace }}/invokeai/outputs" >> ${{ matrix.github-env }} + Get-PSDrive + mkdir C:\InvokeAI + echo "INVOKEAI_ROOT=C:\InvokeAI" >> ${{ matrix.github-env }} - name: Use Cached diffusers-1.5 id: cache-sd-model @@ -120,7 +117,7 @@ jobs: - name: Run the tests id: run-tests - if: matrix.os != 'windows-2022' + if: runner.os != 'Windows' env: # Set offline mode to make sure configure preloaded successfully. HF_HUB_OFFLINE: 1 @@ -133,11 +130,10 @@ jobs: --model ${{ matrix.stable-diffusion-model }} --from_file ${{ env.TEST_PROMPTS }} --root="${{ env.INVOKEAI_ROOT }}" - --outdir="${{ env.INVOKEAI_OUTDIR }}" - name: Archive results id: archive-results - if: matrix.os != 'windows-2022' + if: runner.os != 'Windows' uses: actions/upload-artifact@v3 with: name: results_${{ matrix.pytorch }}_${{ matrix.python-version }} From ef4b03289a5deca7d0a66627fb248b5f312d8aea Mon Sep 17 00:00:00 2001 From: mauwii Date: Tue, 17 Jan 2023 07:05:33 +0100 Subject: [PATCH 4/9] enable image generating step for windows as well - also remove left over debug lines and development branch leftover --- .github/workflows/test-invoke-pip.yml | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml index 0feecb6eb1..169bb96928 100644 --- a/.github/workflows/test-invoke-pip.yml +++ b/.github/workflows/test-invoke-pip.yml @@ -3,7 +3,6 @@ on: push: branches: - 'main' - - 'debug/dev/pyproject-toml' pull_request: types: - 'ready_for_review' @@ -67,7 +66,6 @@ jobs: - name: set INVOKEAI_ROOT Windows if: runner.os == 'Windows' run: | - Get-PSDrive mkdir C:\InvokeAI echo "INVOKEAI_ROOT=C:\InvokeAI" >> ${{ matrix.github-env }} @@ -87,12 +85,8 @@ jobs: if: ${{ github.ref == 'refs/heads/main' }} run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }} - - name: set test prompt to development branch validation - if: ${{ github.ref == 'refs/heads/development' }} - run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }} - - name: set test prompt to Pull Request validation - if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }} + if: ${{ github.ref != 'refs/heads/main' }} run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }} - name: setup python @@ -117,7 +111,6 @@ jobs: - name: Run the tests id: run-tests - if: runner.os != 'Windows' env: # Set offline mode to make sure configure preloaded successfully. HF_HUB_OFFLINE: 1 @@ -133,7 +126,6 @@ jobs: - name: Archive results id: archive-results - if: runner.os != 'Windows' uses: actions/upload-artifact@v3 with: name: results_${{ matrix.pytorch }}_${{ matrix.python-version }} From 6a3a0af6761c00400e20ebb0a9942c7b4c8d6bdf Mon Sep 17 00:00:00 2001 From: mauwii Date: Thu, 19 Jan 2023 00:50:53 +0100 Subject: [PATCH 5/9] update test-invoke-pip.yml - remove stable-diffusion-model from matrix - add windows-cuda-11_6 and linux-cuda-11_6 - enable linux-cpu - disable windows-cpu - change step order - remove job env - set runner.os specific env - install editable - cache models folder - remove `--model` and `--root` arguments from invoke command --- .github/workflows/test-invoke-pip.yml | 93 ++++++++++++++------------- 1 file changed, 50 insertions(+), 43 deletions(-) diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml index 169bb96928..a1a217fd84 100644 --- a/.github/workflows/test-invoke-pip.yml +++ b/.github/workflows/test-invoke-pip.yml @@ -18,19 +18,23 @@ jobs: if: github.event.pull_request.draft == false strategy: matrix: - stable-diffusion-model: - - stable-diffusion-1.5 python-version: # - '3.9' - '3.10' pytorch: + - linux-cuda-11_6 - linux-cuda-11_7 - linux-rocm-5_2 - # - linux-cpu + - linux-cpu - macos-default - - windows-cpu + # - windows-cpu + - windows-cuda-11_6 # - windows-cuda-11_7 include: + - pytorch: linux-cuda-11_6 + os: ubuntu-22.04 + extra-index-url: 'https://download.pytorch.org/whl/cu116' + github-env: $GITHUB_ENV - pytorch: linux-cuda-11_7 os: ubuntu-22.04 github-env: $GITHUB_ENV @@ -38,15 +42,19 @@ jobs: os: ubuntu-22.04 extra-index-url: 'https://download.pytorch.org/whl/rocm5.2' github-env: $GITHUB_ENV - # - pytorch: linux-cpu - # os: ubuntu-22.04 - # extra-index-url: 'https://download.pytorch.org/whl/cpu' - # github-env: $GITHUB_ENV + - pytorch: linux-cpu + os: ubuntu-22.04 + extra-index-url: 'https://download.pytorch.org/whl/cpu' + github-env: $GITHUB_ENV - pytorch: macos-default os: macOS-12 github-env: $GITHUB_ENV - - pytorch: windows-cpu + # - pytorch: windows-cpu + # os: windows-2022 + # github-env: $env:GITHUB_ENV + - pytorch: windows-cuda-11_6 os: windows-2022 + extra-index-url: 'https://download.pytorch.org/whl/cu116' github-env: $env:GITHUB_ENV # - pytorch: windows-cuda-11_7 # os: windows-2022 @@ -54,32 +62,27 @@ jobs: # github-env: $env:GITHUB_ENV name: ${{ matrix.pytorch }} on ${{ matrix.python-version }} runs-on: ${{ matrix.os }} - env: - INVOKEAI_ROOT: ${{ github.workspace }} - INVOKE_MODEL_RECONFIGURE: '--yes' - PYTHONUNBUFFERED: 1 steps: - name: Checkout sources id: checkout-sources uses: actions/checkout@v3 - - name: set INVOKEAI_ROOT Windows - if: runner.os == 'Windows' - run: | - mkdir C:\InvokeAI - echo "INVOKEAI_ROOT=C:\InvokeAI" >> ${{ matrix.github-env }} - - - name: Use Cached diffusers-1.5 - id: cache-sd-model - uses: actions/cache@v3 - env: - cache-name: huggingface-${{ matrix.stable-diffusion-model }} + - name: setup python + uses: actions/setup-python@v4 with: - path: | - ${{ env.INVOKEAI_ROOT }}/models/runwayml - ${{ env.INVOKEAI_ROOT }}/models/stabilityai - ${{ env.INVOKEAI_ROOT }}/models/CompVis - key: ${{ env.cache-name }} + python-version: ${{ matrix.python-version }} + + - name: Set Cache-Directory Windows + if: runner.os == 'Windows' + id: set-cache-dir-windows + run: | + echo "CACHE_DIR=$HOME\invokeai\models" >> ${{ matrix.github-env }} + echo "PIP_NO_CACHE_DIR=1" >> ${{ matrix.github-env }} + + - name: Set Cache-Directory others + if: runner.os != 'Windows' + id: set-cache-dir-others + run: echo "CACHE_DIR=$HOME/invokeai/models" >> ${{ matrix.github-env }} - name: set test prompt to main branch validation if: ${{ github.ref == 'refs/heads/main' }} @@ -89,16 +92,21 @@ jobs: if: ${{ github.ref != 'refs/heads/main' }} run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }} - - name: setup python - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: install invokeai - run: pip3 install --use-pep517 . + run: pip3 install --use-pep517 -e . env: PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }} + - name: Use Cached models + id: cache-sd-model + uses: actions/cache@v3 + env: + cache-name: huggingface-models + with: + path: ${{ env.CACHE_DIR }} + key: ${{ env.cache-name }} + enableCrossOsArchive: true + - name: run configure_invokeai id: run-preload-models env: @@ -107,22 +115,21 @@ jobs: configure_invokeai --yes --default_only - --full-precision # can't use fp16 weights without a GPU + --full-precision + # can't use fp16 weights without a GPU - name: Run the tests id: run-tests - env: - # Set offline mode to make sure configure preloaded successfully. - HF_HUB_OFFLINE: 1 - HF_DATASETS_OFFLINE: 1 - TRANSFORMERS_OFFLINE: 1 + # env: + # # Set offline mode to make sure configure preloaded successfully. + # HF_HUB_OFFLINE: 1 + # HF_DATASETS_OFFLINE: 1 + # TRANSFORMERS_OFFLINE: 1 run: > invoke --no-patchmatch --no-nsfw_checker - --model ${{ matrix.stable-diffusion-model }} --from_file ${{ env.TEST_PROMPTS }} - --root="${{ env.INVOKEAI_ROOT }}" - name: Archive results id: archive-results From ec2a509e010cdd51c0539e47a2528209981a8d12 Mon Sep 17 00:00:00 2001 From: mauwii Date: Thu, 19 Jan 2023 03:11:40 +0100 Subject: [PATCH 6/9] make images in README.md compatible to pypi also add missing new-lines before/after headings --- README.md | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 07501a4242..765bed663a 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@
-![project logo](docs/assets/invoke_ai_banner.png) +![project logo](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/invoke_ai_banner.png) # InvokeAI: A Stable Diffusion Toolkit @@ -28,6 +28,7 @@ [latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main [latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github [latest release link]: https://github.com/invoke-ai/InvokeAI/releases +
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products. @@ -38,8 +39,11 @@ _Note: InvokeAI is rapidly evolving. Please use the [Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature requests. Be sure to use the provided templates. They will help us diagnose issues faster._ +
-![canvas preview](docs/assets/canvas_preview.png) +![canvas preview](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/canvas_preview.png) + +
# Getting Started with InvokeAI @@ -81,6 +85,7 @@ instructions, please see: InvokeAI is supported across Linux, Windows and macOS. Linux users can use either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm driver). + #### System You will need one of the following: @@ -104,19 +109,24 @@ to render 512x512 images. Feature documentation can be reviewed by navigating to [the InvokeAI Documentation page](https://invoke-ai.github.io/InvokeAI/features/) -### *Web Server & UI* +### *Web Server & UI* + InvokeAI offers a locally hosted Web Server & React Frontend, with an industry leading user experience. The Web-based UI allows for simple and intuitive workflows, and is responsive for use on mobile devices and tablets accessing the web server. ### *Unified Canvas* + The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/outpainting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more. ### *Advanced Prompt Syntax* + InvokeAI's advanced prompt syntax allows for token weighting, cross-attention control, and prompt blending, allowing for fine-tuned tweaking of your invocations and exploration of the latent space. ### *Command Line Interface* + For users utilizing a terminal-based environment, or who want to take advantage of CLI features, InvokeAI offers an extensive and actively supported command-line interface that provides the full suite of generation functionality available in the tool. ### Other features + - *Support for both ckpt and diffusers models* - *SD 2.0, 2.1 support* - *Noise Control & Tresholding* @@ -126,6 +136,7 @@ For users utilizing a terminal-based environment, or who want to take advantage - *Model Manager & Support* ### Coming Soon + - *Node-Based Architecture & UI* - And more... @@ -149,7 +160,7 @@ To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the If you are unfamiliar with how to contribute to GitHub projects, here is a -[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**. +[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**. We hope you enjoy using our software as much as we enjoy creating it, and we hope that some of those of you who are reading this will elect From a491644e563449ebe68ee888dae9ee7c23c5f1ef Mon Sep 17 00:00:00 2001 From: mauwii Date: Thu, 19 Jan 2023 03:12:59 +0100 Subject: [PATCH 7/9] fix dependencies/requirements --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5ae6a9b5ea..8359f14e6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = ["setuptools~=65.5.0", "pip~=22.3.1", "wheel"] +requires = ["setuptools~=65.5", "pip~=22.3", "wheel"] build-backend = "setuptools.build_meta" [project] @@ -56,7 +56,7 @@ dependencies = [ "k-diffusion", # replaceing "k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip", "kornia", "npyscreen", - "numpy<1.24,>=1.23", + "numpy~=1.23", "omegaconf", "opencv-python", "picklescan", From fc5fdae5629aeba42f27693746105f5b22f6906e Mon Sep 17 00:00:00 2001 From: mauwii Date: Thu, 19 Jan 2023 03:44:00 +0100 Subject: [PATCH 8/9] update installation instructions --- docs/installation/020_INSTALL_MANUAL.md | 8 ++- docs/installation/060_INSTALL_PATCHMATCH.md | 23 ++++---- docs/installation/INSTALL_MANUAL.md | 59 +-------------------- 3 files changed, 16 insertions(+), 74 deletions(-) diff --git a/docs/installation/020_INSTALL_MANUAL.md b/docs/installation/020_INSTALL_MANUAL.md index ead1eb6536..1fad83121f 100644 --- a/docs/installation/020_INSTALL_MANUAL.md +++ b/docs/installation/020_INSTALL_MANUAL.md @@ -119,10 +119,8 @@ manager, please follow these steps: 6. Run PIP - Be sure that the `invokeai` environment is active before doing this: - ```bash - pip install --prefer-binary -r requirements.txt + pip --python invokeai install --use-pep517 . ``` 7. Set up the runtime directory @@ -137,7 +135,7 @@ manager, please follow these steps: default to `invokeai` in your home directory. ```bash - configure_invokeai.py --root_dir ~/Programs/invokeai + configure_invokeai --root_dir ~/Programs/invokeai ``` The script `configure_invokeai.py` will interactively guide you through the @@ -452,7 +450,7 @@ time. Note that this method only works with the PIP method. step. 3. Run one additional step while you are in the source code repository - directory `pip install .` (note the dot at the end). + directory `pip install --use-pep517 .` (note the dot at the end). 4. That's all! Now, whenever you activate the virtual environment, `invoke.py` will know where to look for the runtime directory without diff --git a/docs/installation/060_INSTALL_PATCHMATCH.md b/docs/installation/060_INSTALL_PATCHMATCH.md index 592e1f56c3..989be24731 100644 --- a/docs/installation/060_INSTALL_PATCHMATCH.md +++ b/docs/installation/060_INSTALL_PATCHMATCH.md @@ -18,7 +18,13 @@ Windows systems with no extra intervention. ## Macintosh -PyPatchMatch is not currently supported, but the team is working on it. +You need to have opencv installed so that pypatchmatch can be built: + +```bash +brew install opencv +``` + +The next time you start `invoke`, after sucesfully installing opencv, pypatchmatch will be built. ## Linux @@ -39,23 +45,16 @@ Prior to installing PyPatchMatch, you need to take the following steps: sudo apt install python3-opencv libopencv-dev ``` -3. Fix the naming of the `opencv` package configuration file: - - ```sh - cd /usr/lib/x86_64-linux-gnu/pkgconfig/ - ln -sf opencv4.pc opencv.pc - ``` - -4. Activate the environment you use for invokeai, either with `conda` or with a +3. Activate the environment you use for invokeai, either with `conda` or with a virtual environment. -5. Install pypatchmatch: +4. Install pypatchmatch: ```sh - pip install "git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch" + pip install pypatchmatch ``` -6. Confirm that pypatchmatch is installed. At the command-line prompt enter +5. Confirm that pypatchmatch is installed. At the command-line prompt enter `python`, and then at the `>>>` line type `from patchmatch import patch_match`: It should look like the follwing: diff --git a/docs/installation/INSTALL_MANUAL.md b/docs/installation/INSTALL_MANUAL.md index 4d62df9415..f45463c535 100644 --- a/docs/installation/INSTALL_MANUAL.md +++ b/docs/installation/INSTALL_MANUAL.md @@ -254,65 +254,10 @@ steps: source invokeai/bin/activate ``` -4. Pick the correct `requirements*.txt` file for your hardware and operating - system. - - We have created a series of environment files suited for different operating - systems and GPU hardware. They are located in the - `environments-and-requirements` directory: - -
- - | filename | OS | - | :---------------------------------: | :-------------------------------------------------------------: | - | requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU | - | requirements-lin-arm64.txt | Linux running on arm64 systems | - | requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU | - | requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration | - | requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU
(supports Google Colab too) | - -
- - Select the appropriate requirements file, and make a link to it from - `requirements.txt` in the top-level InvokeAI directory. The command to do - this from the top-level directory is: - - !!! example "" - - === "Macintosh and Linux" - - !!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes." - - ```bash - ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt - ``` - - === "Windows" - - !!! info "on Windows, admin privileges are required to make links, so we use the copy command instead" - - ```cmd - copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt - ``` - - !!! warning - - Please do not link or copy `environments-and-requirements/requirements-base.txt`. - This is a base requirements file that does not have the platform-specific - libraries. Also, be sure to link or copy the platform-specific file to - a top-level file named `requirements.txt` as shown here. Running pip on - a requirements file in a subdirectory will not work as expected. - - When this is done, confirm that a file named `requirements.txt` has been - created in the InvokeAI root directory and that it points to the correct - file in `environments-and-requirements`. - -5. Run PIP - - Be sure that the `invokeai` environment is active before doing this: +4. Run PIP ```bash - pip install --prefer-binary -r requirements.txt + pip --python invokeai install --use-pep517 . ``` --- From d7217e38013dd1b98f73c1f09a3ef5621267c894 Mon Sep 17 00:00:00 2001 From: mauwii Date: Thu, 19 Jan 2023 03:48:23 +0100 Subject: [PATCH 9/9] disable instable CI tests for windows runners therefore enable all pytorch versions to verify installation --- .github/workflows/test-invoke-pip.yml | 29 ++++++++++++++------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml index a1a217fd84..a88a2fb883 100644 --- a/.github/workflows/test-invoke-pip.yml +++ b/.github/workflows/test-invoke-pip.yml @@ -27,9 +27,9 @@ jobs: - linux-rocm-5_2 - linux-cpu - macos-default - # - windows-cpu + - windows-cpu - windows-cuda-11_6 - # - windows-cuda-11_7 + - windows-cuda-11_7 include: - pytorch: linux-cuda-11_6 os: ubuntu-22.04 @@ -49,17 +49,17 @@ jobs: - pytorch: macos-default os: macOS-12 github-env: $GITHUB_ENV - # - pytorch: windows-cpu - # os: windows-2022 - # github-env: $env:GITHUB_ENV + - pytorch: windows-cpu + os: windows-2022 + github-env: $env:GITHUB_ENV - pytorch: windows-cuda-11_6 os: windows-2022 extra-index-url: 'https://download.pytorch.org/whl/cu116' github-env: $env:GITHUB_ENV - # - pytorch: windows-cuda-11_7 - # os: windows-2022 - # extra-index-url: 'https://download.pytorch.org/whl/cu117' - # github-env: $env:GITHUB_ENV + - pytorch: windows-cuda-11_7 + os: windows-2022 + extra-index-url: 'https://download.pytorch.org/whl/cu117' + github-env: $env:GITHUB_ENV name: ${{ matrix.pytorch }} on ${{ matrix.python-version }} runs-on: ${{ matrix.os }} steps: @@ -119,12 +119,13 @@ jobs: # can't use fp16 weights without a GPU - name: Run the tests + if: runner.os != 'Windows' id: run-tests - # env: - # # Set offline mode to make sure configure preloaded successfully. - # HF_HUB_OFFLINE: 1 - # HF_DATASETS_OFFLINE: 1 - # TRANSFORMERS_OFFLINE: 1 + env: + # Set offline mode to make sure configure preloaded successfully. + HF_HUB_OFFLINE: 1 + HF_DATASETS_OFFLINE: 1 + TRANSFORMERS_OFFLINE: 1 run: > invoke --no-patchmatch