mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
replace legacy setup.py with pyproject.toml
other changes which where required: - move configure_invokeai.py into ldm.invoke - update files which imported configure_invokeai to use new location: - ldm/invoke/CLI.py - scripts/load_models.py - scripts/preload_models.py - update test-invoke-pip.yml: - remove pr type "converted_to_draft" - remove reference to dev/diffusers - remove no more needed requirements from matrix - add pytorch to matrix - install via `pip3 install --use-pep517 .` - use the created executables - this should also fix configure_invoke not executed in windows To install use `pip install --use-pep517 -e .` where `-e` is optional
This commit is contained in:
parent
f3ff386491
commit
5afb63e41b
60
.github/workflows/test-invoke-pip.yml
vendored
60
.github/workflows/test-invoke-pip.yml
vendored
@ -8,47 +8,49 @@ on:
|
||||
- 'ready_for_review'
|
||||
- 'opened'
|
||||
- 'synchronize'
|
||||
- 'converted_to_draft'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# fail_if_pull_request_is_draft:
|
||||
# if: github.event.pull_request.draft == true && github.head_ref != 'dev/diffusers'
|
||||
# runs-on: ubuntu-18.04
|
||||
# steps:
|
||||
# - name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass.
|
||||
# run: exit 1
|
||||
matrix:
|
||||
if: github.event.pull_request.draft == false || github.head_ref == 'dev/diffusers'
|
||||
if: github.event.pull_request.draft == false
|
||||
strategy:
|
||||
matrix:
|
||||
stable-diffusion-model:
|
||||
- stable-diffusion-1.5
|
||||
requirements-file:
|
||||
- requirements-lin-cuda.txt
|
||||
- requirements-lin-amd.txt
|
||||
- requirements-mac-mps-cpu.txt
|
||||
- requirements-win-colab-cuda.txt
|
||||
python-version:
|
||||
# - '3.9'
|
||||
- '3.10'
|
||||
pytorch:
|
||||
- linux-cuda-11_7
|
||||
- linux-rocm-5_2
|
||||
# - linux-cpu
|
||||
- macos-default
|
||||
# - windows-cpu
|
||||
- windows-cuda-11_7
|
||||
include:
|
||||
- requirements-file: requirements-lin-cuda.txt
|
||||
- pytorch: linux-cuda-11_7
|
||||
os: ubuntu-22.04
|
||||
github-env: $GITHUB_ENV
|
||||
- requirements-file: requirements-lin-amd.txt
|
||||
- pytorch: linux-rocm-5_2
|
||||
os: ubuntu-22.04
|
||||
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
|
||||
github-env: $GITHUB_ENV
|
||||
- requirements-file: requirements-mac-mps-cpu.txt
|
||||
# - pytorch: linux-cpu
|
||||
# os: ubuntu-22.04
|
||||
# extra-index-url: 'https://download.pytorch.org/whl/cpu'
|
||||
- pytorch: macos
|
||||
os: macOS-12
|
||||
github-env: $GITHUB_ENV
|
||||
- requirements-file: requirements-win-colab-cuda.txt
|
||||
# - pytorch: windows-cpu
|
||||
# os: windows-2022
|
||||
- pytorch: windows-cuda-11_7
|
||||
os: windows-2022
|
||||
extra-index-url: 'https://download.pytorch.org/whl/cu117'
|
||||
github-env: $env:GITHUB_ENV
|
||||
name: ${{ matrix.requirements-file }} on ${{ matrix.python-version }}
|
||||
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
INVOKE_MODEL_RECONFIGURE: '--yes'
|
||||
@ -96,28 +98,22 @@ jobs:
|
||||
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
|
||||
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
|
||||
|
||||
- name: create requirements.txt
|
||||
run: cp 'environments-and-requirements/${{ matrix.requirements-file }}' '${{ matrix.requirements-file }}'
|
||||
|
||||
- name: setup python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
# cache: 'pip'
|
||||
# cache-dependency-path: ${{ matrix.requirements-file }}
|
||||
|
||||
- name: install dependencies
|
||||
run: pip3 install --upgrade pip setuptools wheel
|
||||
- name: install invokeai
|
||||
run: pip3 install --use-pep517 .
|
||||
env:
|
||||
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
|
||||
|
||||
- name: install requirements
|
||||
run: pip3 install -r '${{ matrix.requirements-file }}'
|
||||
|
||||
- name: run configure_invokeai.py
|
||||
- name: run configure_invokeai
|
||||
id: run-preload-models
|
||||
env:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGINGFACE_TOKEN }}
|
||||
run: >
|
||||
configure_invokeai.py
|
||||
configure_invokeai
|
||||
--yes
|
||||
--default_only
|
||||
--full-precision # can't use fp16 weights without a GPU
|
||||
@ -131,7 +127,7 @@ jobs:
|
||||
HF_DATASETS_OFFLINE: 1
|
||||
TRANSFORMERS_OFFLINE: 1
|
||||
run: >
|
||||
python3 scripts/invoke.py
|
||||
invoke
|
||||
--no-patchmatch
|
||||
--no-nsfw_checker
|
||||
--model ${{ matrix.stable-diffusion-model }}
|
||||
@ -144,5 +140,5 @@ jobs:
|
||||
if: matrix.os != 'windows-2022'
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }}
|
||||
name: results_${{ matrix.pytorch }}_${{ matrix.python-version }}
|
||||
path: ${{ env.INVOKEAI_ROOT }}/outputs
|
||||
|
@ -485,7 +485,7 @@ def do_command(command:str, gen, opt:Args, completer) -> tuple:
|
||||
optimize_model(path[1], gen, opt, completer)
|
||||
completer.add_history(command)
|
||||
operation = None
|
||||
|
||||
|
||||
|
||||
elif command.startswith('!optimize'):
|
||||
path = shlex.split(command)
|
||||
@ -570,11 +570,11 @@ def import_model(model_path:str, gen, opt, completer):
|
||||
(3) a huggingface repository id
|
||||
'''
|
||||
model_name = None
|
||||
|
||||
|
||||
if model_path.startswith(('http:','https:','ftp:')):
|
||||
model_name = import_ckpt_model(model_path, gen, opt, completer)
|
||||
elif os.path.exists(model_path) and model_path.endswith('.ckpt') and os.path.isfile(model_path):
|
||||
model_name = import_ckpt_model(model_path, gen, opt, completer)
|
||||
model_name = import_ckpt_model(model_path, gen, opt, completer)
|
||||
elif re.match('^[\w.+-]+/[\w.+-]+$',model_path):
|
||||
model_name = import_diffuser_model(model_path, gen, opt, completer)
|
||||
elif os.path.isdir(model_path):
|
||||
@ -584,12 +584,12 @@ def import_model(model_path:str, gen, opt, completer):
|
||||
|
||||
if not model_name:
|
||||
return
|
||||
|
||||
|
||||
if not _verify_load(model_name, gen):
|
||||
print('** model failed to load. Discarding configuration entry')
|
||||
gen.model_manager.del_model(model_name)
|
||||
return
|
||||
|
||||
|
||||
if input('Make this the default model? [n] ') in ('y','Y'):
|
||||
gen.model_manager.set_default_model(model_name)
|
||||
|
||||
@ -690,7 +690,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer):
|
||||
else:
|
||||
print(f'** {model_name_or_path} is neither an existing model nor the path to a .ckpt file')
|
||||
return
|
||||
|
||||
|
||||
if not ckpt_path.is_absolute():
|
||||
ckpt_path = Path(Globals.root,ckpt_path)
|
||||
|
||||
@ -698,7 +698,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer):
|
||||
if diffuser_path.exists():
|
||||
print(f'** {model_name_or_path} is already optimized. Will not overwrite. If this is an error, please remove the directory {diffuser_path} and try again.')
|
||||
return
|
||||
|
||||
|
||||
new_config = gen.model_manager.convert_and_import(
|
||||
ckpt_path,
|
||||
diffuser_path,
|
||||
@ -747,7 +747,7 @@ def edit_model(model_name:str, gen, opt, completer):
|
||||
continue
|
||||
completer.set_line(info[attribute])
|
||||
info[attribute] = input(f'{attribute}: ') or info[attribute]
|
||||
|
||||
|
||||
if new_name != model_name:
|
||||
manager.del_model(model_name)
|
||||
|
||||
@ -1099,7 +1099,7 @@ def report_model_error(opt:Namespace, e:Exception):
|
||||
if yes_to_all is not None:
|
||||
sys.argv.append(yes_to_all)
|
||||
|
||||
import configure_invokeai
|
||||
import ldm.invoke.configure_invokeai as configure_invokeai
|
||||
configure_invokeai.main()
|
||||
print('** InvokeAI will now restart')
|
||||
sys.argv = previous_args
|
||||
|
130
pyproject.toml
Normal file
130
pyproject.toml
Normal file
@ -0,0 +1,130 @@
|
||||
[build-system]
|
||||
requires = ["setuptools~=65.5.0", "pip~=22.3.1", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "InvokeAI"
|
||||
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
|
||||
requires-python = ">=3.9, <3.11"
|
||||
readme = { content-type = "text/markdown", file = "README.md" }
|
||||
keywords = ["stable-diffusion", "AI"]
|
||||
dynamic = ["version"]
|
||||
license = { file = "LICENSE" }
|
||||
authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }]
|
||||
classifiers = [
|
||||
'Development Status :: 4 - Beta',
|
||||
'Environment :: GPU',
|
||||
'Environment :: GPU :: NVIDIA CUDA',
|
||||
'Environment :: MacOS X',
|
||||
'Intended Audience :: End Users/Desktop',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Operating System :: MacOS',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Programming Language :: Python :: 3 :: Only',
|
||||
'Programming Language :: Python :: 3.8',
|
||||
'Programming Language :: Python :: 3.9',
|
||||
'Programming Language :: Python :: 3.10',
|
||||
'Topic :: Artistic Software',
|
||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
|
||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
|
||||
'Topic :: Multimedia :: Graphics',
|
||||
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
||||
'Topic :: Scientific/Engineering :: Image Processing',
|
||||
]
|
||||
dependencies = [
|
||||
"accelerate",
|
||||
"albumentations",
|
||||
"clip_anytorch", # replaceing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
|
||||
"clipseg @ https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip", # is this still necesarry with diffusers?
|
||||
"datasets",
|
||||
"diffusers[torch]~=0.11",
|
||||
"dnspython==2.2.1",
|
||||
"einops",
|
||||
"eventlet",
|
||||
"facexlib",
|
||||
"flask==2.1.3",
|
||||
"flask_cors==3.0.10",
|
||||
"flask_socketio==5.3.0",
|
||||
"flaskwebgui==1.0.3",
|
||||
"getpass_asterisk",
|
||||
"gfpgan==1.3.8",
|
||||
"huggingface-hub>=0.11.1",
|
||||
"imageio",
|
||||
"imageio-ffmpeg",
|
||||
"k-diffusion", # replaceing "k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip",
|
||||
"kornia",
|
||||
"npyscreen",
|
||||
"numpy<1.24,>=1.23",
|
||||
"omegaconf",
|
||||
"opencv-python",
|
||||
"picklescan",
|
||||
"pillow",
|
||||
"pudb",
|
||||
"pypatchmatch",
|
||||
"pyreadline3",
|
||||
"pytorch-lightning==1.7.7",
|
||||
"realesrgan",
|
||||
"requests==2.25.1",
|
||||
"safetensors",
|
||||
"scikit-image>=0.19",
|
||||
"send2trash",
|
||||
"streamlit",
|
||||
"taming-transformers-rom1504",
|
||||
"test-tube>=0.7.5",
|
||||
"torch>=1.13.1",
|
||||
"torch-fidelity",
|
||||
"torchvision>=0.14.1",
|
||||
"torchmetrics",
|
||||
"transformers~=4.25",
|
||||
"windows-curses; sys_platform=='win32'",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
"dist" = ["pip-tools", "pipdeptree", "twine"]
|
||||
"docs" = [
|
||||
"mkdocs-material<9.0",
|
||||
"mkdocs-git-revision-date-localized-plugin",
|
||||
"mkdocs-redirects==1.2.0",
|
||||
]
|
||||
test = ["pytest>6.0.0", "pytest-cov"]
|
||||
|
||||
[project.scripts]
|
||||
"configure_invokeai" = "ldm.invoke.configure_invokeai:main"
|
||||
"dream" = "ldm.invoke:CLI.main"
|
||||
"invoke" = "ldm.invoke:CLI.main"
|
||||
"legacy_api" = "scripts:legacy_api.main"
|
||||
"load_models" = "scripts:configure_invokeai.main"
|
||||
"merge_embeddings" = "scripts:merge_embeddings.main"
|
||||
"preload_models" = "ldm.invoke.configure_invokeai:main"
|
||||
|
||||
[project.urls]
|
||||
"Homepage" = "https://invoke-ai.github.io/InvokeAI/"
|
||||
"Documentation" = "https://invoke-ai.github.io/InvokeAI/"
|
||||
"Source" = "https://github.com/invoke-ai/InvokeAI/"
|
||||
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"
|
||||
"Discord" = "https://discord.gg/ZmtBAhwWhy"
|
||||
|
||||
[tool.setuptools.dynamic]
|
||||
version = { attr = "ldm.invoke.__version__" }
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
"where" = ["."]
|
||||
"include" = ["assets", "backend*", "configs*", "frontend.dist*", "ldm*"]
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
"assets" = ["caution.png"]
|
||||
"backend" = ["**.png"]
|
||||
"configs" = ["*.example", "**/*.yaml", "*.txt"]
|
||||
"frontend.dist" = ["**"]
|
||||
|
||||
[tool.setuptools.exclude-package-data]
|
||||
configs = ["models.yaml"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
minversion = "6.0"
|
||||
addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov=./ldm/ --cov=./backend --cov-branch"
|
||||
python_files = ["test_*.py"]
|
||||
pythonpath = [".venv/lib/python3.9", ".venv/lib/python3.10"]
|
||||
testpaths = ["tests"]
|
@ -5,7 +5,7 @@
|
||||
# two machines must share a common .cache directory.
|
||||
|
||||
import warnings
|
||||
import configure_invokeai
|
||||
import ldm.invoke.configure_invokeai as configure_invokeai
|
||||
|
||||
if __name__ == '__main__':
|
||||
configure_invokeai.main()
|
||||
|
@ -5,7 +5,7 @@
|
||||
# two machines must share a common .cache directory.
|
||||
|
||||
import warnings
|
||||
import configure_invokeai
|
||||
import ldm.invoke.configure_invokeai as configure_invokeai
|
||||
|
||||
if __name__ == '__main__':
|
||||
configure_invokeai.main()
|
||||
|
99
setup.py
99
setup.py
@ -1,99 +0,0 @@
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
def list_files(directory):
|
||||
listing = list()
|
||||
for root, dirs, files in os.walk(directory,topdown=False):
|
||||
pair = (root,[os.path.join(root,f) for f in files])
|
||||
listing.append(pair)
|
||||
return listing
|
||||
|
||||
|
||||
def get_version()->str:
|
||||
from ldm.invoke import __version__ as version
|
||||
return version
|
||||
|
||||
# The canonical version number is stored in the file ldm/invoke/_version.py
|
||||
VERSION = get_version()
|
||||
DESCRIPTION = ('An implementation of Stable Diffusion which provides various new features'
|
||||
' and options to aid the image generation process')
|
||||
LONG_DESCRIPTION = ('This version of Stable Diffusion features a slick WebGUI, an'
|
||||
' interactive command-line script that combines text2img and img2img'
|
||||
' functionality in a "dream bot" style interface, and multiple features'
|
||||
' and other enhancements.')
|
||||
HOMEPAGE = 'https://github.com/invoke-ai/InvokeAI'
|
||||
FRONTEND_FILES = list_files('frontend/dist')
|
||||
FRONTEND_FILES.append(('assets',['assets/caution.png']))
|
||||
print(FRONTEND_FILES)
|
||||
|
||||
REQUIREMENTS=[
|
||||
'accelerate',
|
||||
'albumentations',
|
||||
'diffusers',
|
||||
'eventlet',
|
||||
'flask_cors',
|
||||
'flask_socketio',
|
||||
'flaskwebgui',
|
||||
'getpass_asterisk',
|
||||
'imageio-ffmpeg',
|
||||
'pyreadline3',
|
||||
'realesrgan',
|
||||
'send2trash',
|
||||
'streamlit',
|
||||
'taming-transformers-rom1504',
|
||||
'test-tube',
|
||||
'torch-fidelity',
|
||||
'torch',
|
||||
'torchvision',
|
||||
'transformers',
|
||||
'picklescan',
|
||||
'clip',
|
||||
'clipseg',
|
||||
'gfpgan',
|
||||
'k-diffusion',
|
||||
'pypatchmatch',
|
||||
]
|
||||
|
||||
setup(
|
||||
name='InvokeAI',
|
||||
version=VERSION,
|
||||
description=DESCRIPTION,
|
||||
long_description=LONG_DESCRIPTION,
|
||||
author='The InvokeAI Project',
|
||||
author_email='lincoln.stein@gmail.com',
|
||||
url=HOMEPAGE,
|
||||
license='MIT',
|
||||
packages=find_packages(exclude=['tests.*']),
|
||||
install_requires=REQUIREMENTS,
|
||||
dependency_links=['https://download.pytorch.org/whl/torch_stable.html'],
|
||||
python_requires='>=3.9, <4',
|
||||
classifiers=[
|
||||
'Development Status :: 4 - Beta',
|
||||
'Environment :: GPU',
|
||||
'Environment :: GPU :: NVIDIA CUDA',
|
||||
'Environment :: MacOS X',
|
||||
'Intended Audience :: End Users/Desktop',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Operating System :: MacOS',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Programming Language :: Python :: 3 :: Only,'
|
||||
'Programming Language :: Python :: 3.8',
|
||||
'Programming Language :: Python :: 3.9',
|
||||
'Programming Language :: Python :: 3.10',
|
||||
'Topic :: Artistic Software',
|
||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
|
||||
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
|
||||
'Topic :: Multimedia :: Graphics',
|
||||
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
||||
'Topic :: Scientific/Engineering :: Image Processing',
|
||||
],
|
||||
scripts = ['scripts/invoke.py','scripts/configure_invokeai.py', 'scripts/sd-metadata.py',
|
||||
'scripts/preload_models.py', 'scripts/images2prompt.py','scripts/merge_embeddings.py',
|
||||
'scripts/textual_inversion_fe.py','scripts/textual_inversion.py'
|
||||
],
|
||||
data_files=FRONTEND_FILES,
|
||||
)
|
Loading…
Reference in New Issue
Block a user