mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
c238a7f18b
Upgrade pydantic and fastapi to latest. - pydantic~=2.4.2 - fastapi~=103.2 - fastapi-events~=0.9.1 **Big Changes** There are a number of logic changes needed to support pydantic v2. Most changes are very simple, like using the new methods to serialized and deserialize models, but there are a few more complex changes. **Invocations** The biggest change relates to invocation creation, instantiation and validation. Because pydantic v2 moves all validation logic into the rust pydantic-core, we may no longer directly stick our fingers into the validation pie. Previously, we (ab)used models and fields to allow invocation fields to be optional at instantiation, but required when `invoke()` is called. We directly manipulated the fields and invocation models when calling `invoke()`. With pydantic v2, this is much more involved. Changes to the python wrapper do not propagate down to the rust validation logic - you have to rebuild the model. This causes problem with concurrent access to the invocation classes and is not a free operation. This logic has been totally refactored and we do not need to change the model any more. The details are in `baseinvocation.py`, in the `InputField` function and `BaseInvocation.invoke_internal()` method. In the end, this implementation is cleaner. **Invocation Fields** In pydantic v2, you can no longer directly add or remove fields from a model. Previously, we did this to add the `type` field to invocations. **Invocation Decorators** With pydantic v2, we instead use the imperative `create_model()` API to create a new model with the additional field. This is done in `baseinvocation.py` in the `invocation()` wrapper. A similar technique is used for `invocation_output()`. **Minor Changes** There are a number of minor changes around the pydantic v2 models API. **Protected `model_` Namespace** All models' pydantic-provided methods and attributes are prefixed with `model_` and this is considered a protected namespace. This causes some conflict, because "model" means something to us, and we have a ton of pydantic models with attributes starting with "model_". Forunately, there are no direct conflicts. However, in any pydantic model where we define an attribute or method that starts with "model_", we must tell set the protected namespaces to an empty tuple. ```py class IPAdapterModelField(BaseModel): model_name: str = Field(description="Name of the IP-Adapter model") base_model: BaseModelType = Field(description="Base model") model_config = ConfigDict(protected_namespaces=()) ``` **Model Serialization** Pydantic models no longer have `Model.dict()` or `Model.json()`. Instead, we use `Model.model_dump()` or `Model.model_dump_json()`. **Model Deserialization** Pydantic models no longer have `Model.parse_obj()` or `Model.parse_raw()`, and there are no `parse_raw_as()` or `parse_obj_as()` functions. Instead, you need to create a `TypeAdapter` object to parse python objects or JSON into a model. ```py adapter_graph = TypeAdapter(Graph) deserialized_graph_from_json = adapter_graph.validate_json(graph_json) deserialized_graph_from_dict = adapter_graph.validate_python(graph_dict) ``` **Field Customisation** Pydantic `Field`s no longer accept arbitrary args. Now, you must put all additional arbitrary args in a `json_schema_extra` arg on the field. **Schema Customisation** FastAPI and pydantic schema generation now follows the OpenAPI version 3.1 spec. This necessitates two changes: - Our schema customization logic has been revised - Schema parsing to build node templates has been revised The specific aren't important, but this does present additional surface area for bugs. **Performance Improvements** Pydantic v2 is a full rewrite with a rust backend. This offers a substantial performance improvement (pydantic claims 5x to 50x depending on the task). We'll notice this the most during serialization and deserialization of sessions/graphs, which happens very very often - a couple times per node. I haven't done any benchmarks, but anecdotally, graph execution is much faster. Also, very larges graphs - like with massive iterators - are much, much faster.
267 lines
9.0 KiB
TOML
267 lines
9.0 KiB
TOML
[build-system]
|
|
requires = ["setuptools~=65.5", "pip~=22.3", "wheel"]
|
|
build-backend = "setuptools.build_meta"
|
|
|
|
[project]
|
|
name = "InvokeAI"
|
|
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
|
|
requires-python = ">=3.9, <3.12"
|
|
readme = { content-type = "text/markdown", file = "README.md" }
|
|
keywords = ["stable-diffusion", "AI"]
|
|
dynamic = ["version"]
|
|
license = { file = "LICENSE" }
|
|
authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }]
|
|
classifiers = [
|
|
'Development Status :: 4 - Beta',
|
|
'Environment :: GPU',
|
|
'Environment :: GPU :: NVIDIA CUDA',
|
|
'Environment :: MacOS X',
|
|
'Intended Audience :: End Users/Desktop',
|
|
'Intended Audience :: Developers',
|
|
'License :: OSI Approved :: MIT License',
|
|
'Operating System :: POSIX :: Linux',
|
|
'Operating System :: MacOS',
|
|
'Operating System :: Microsoft :: Windows',
|
|
'Programming Language :: Python :: 3 :: Only',
|
|
'Programming Language :: Python :: 3.10',
|
|
'Topic :: Artistic Software',
|
|
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
|
|
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
|
|
'Topic :: Multimedia :: Graphics',
|
|
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
|
'Topic :: Scientific/Engineering :: Image Processing',
|
|
]
|
|
dependencies = [
|
|
"accelerate~=0.23.0",
|
|
"albumentations",
|
|
"click",
|
|
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
|
|
"compel~=2.0.2",
|
|
"controlnet-aux>=0.0.6",
|
|
"timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26
|
|
"datasets",
|
|
# When bumping diffusers beyond 0.21, make sure to address this:
|
|
# https://github.com/invoke-ai/InvokeAI/blob/fc09ab7e13cb7ca5389100d149b6422ace7b8ed3/invokeai/app/invocations/latent.py#L513
|
|
"diffusers[torch]~=0.21.0",
|
|
"dnspython~=2.4.0",
|
|
"dynamicprompts",
|
|
"easing-functions",
|
|
"einops",
|
|
"facexlib",
|
|
"fastapi~=0.103.2",
|
|
"fastapi-events~=0.9.1",
|
|
"huggingface-hub~=0.16.4",
|
|
"invisible-watermark~=0.2.0", # needed to install SDXL base and refiner using their repo_ids
|
|
"matplotlib", # needed for plotting of Penner easing functions
|
|
"mediapipe", # needed for "mediapipeface" controlnet model
|
|
"numpy",
|
|
"npyscreen",
|
|
"omegaconf",
|
|
"onnx",
|
|
"onnxruntime",
|
|
"opencv-python",
|
|
"pydantic~=2.4.2",
|
|
"pydantic-settings~=2.0.3",
|
|
"picklescan",
|
|
"pillow",
|
|
"prompt-toolkit",
|
|
"pympler~=1.0.1",
|
|
"pypatchmatch",
|
|
'pyperclip',
|
|
"pyreadline3",
|
|
"python-multipart",
|
|
"python-socketio",
|
|
"pytorch-lightning",
|
|
"realesrgan",
|
|
"requests~=2.28.2",
|
|
"rich~=13.3",
|
|
"safetensors~=0.4.0",
|
|
"scikit-image~=0.21.0",
|
|
"semver~=3.0.1",
|
|
"send2trash",
|
|
"test-tube~=0.7.5",
|
|
"torch~=2.0.1",
|
|
"torchvision~=0.15.2",
|
|
"torchmetrics~=0.11.0",
|
|
"torchsde~=0.2.5",
|
|
"transformers~=4.31.0",
|
|
"uvicorn[standard]~=0.21.1",
|
|
"windows-curses; sys_platform=='win32'",
|
|
]
|
|
|
|
[project.optional-dependencies]
|
|
"dist" = ["pip-tools", "pipdeptree", "twine"]
|
|
"docs" = [
|
|
"mkdocs-material<9.0",
|
|
"mkdocs-git-revision-date-localized-plugin",
|
|
"mkdocs-redirects==1.2.0",
|
|
]
|
|
"dev" = ["jurigged", "pudb"]
|
|
"test" = [
|
|
"black",
|
|
"flake8",
|
|
"Flake8-pyproject",
|
|
"isort",
|
|
"mypy",
|
|
"pre-commit",
|
|
"pytest>6.0.0",
|
|
"pytest-cov",
|
|
"pytest-datadir",
|
|
]
|
|
"xformers" = [
|
|
"xformers~=0.0.19; sys_platform!='darwin'",
|
|
"triton; sys_platform=='linux'",
|
|
]
|
|
"onnx" = ["onnxruntime"]
|
|
"onnx-cuda" = ["onnxruntime-gpu"]
|
|
"onnx-directml" = ["onnxruntime-directml"]
|
|
|
|
[project.scripts]
|
|
|
|
# legacy entrypoints; provided for backwards compatibility
|
|
"configure_invokeai.py" = "invokeai.frontend.install.invokeai_configure:invokeai_configure"
|
|
"textual_inversion.py" = "invokeai.frontend.training:invokeai_textual_inversion"
|
|
|
|
# shortcut commands to start cli and web
|
|
# "invokeai --web" will launch the web interface
|
|
# "invokeai" will launch the CLI
|
|
"invokeai" = "invokeai.frontend.legacy_launch_invokeai:main"
|
|
|
|
# new shortcut to launch web interface
|
|
"invokeai-web" = "invokeai.app.api_app:invoke_api"
|
|
|
|
# full commands
|
|
"invokeai-configure" = "invokeai.frontend.install.invokeai_configure:invokeai_configure"
|
|
"invokeai-merge" = "invokeai.frontend.merge:invokeai_merge_diffusers"
|
|
"invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion"
|
|
"invokeai-model-install" = "invokeai.frontend.install.model_install:main"
|
|
"invokeai-migrate3" = "invokeai.backend.install.migrate_to_3:main"
|
|
"invokeai-update" = "invokeai.frontend.install.invokeai_update:main"
|
|
"invokeai-metadata" = "invokeai.backend.image_util.invoke_metadata:main"
|
|
"invokeai-node-cli" = "invokeai.app.cli_app:invoke_cli"
|
|
"invokeai-node-web" = "invokeai.app.api_app:invoke_api"
|
|
"invokeai-import-images" = "invokeai.frontend.install.import_images:main"
|
|
"invokeai-db-maintenance" = "invokeai.backend.util.db_maintenance:main"
|
|
|
|
[project.urls]
|
|
"Homepage" = "https://invoke-ai.github.io/InvokeAI/"
|
|
"Documentation" = "https://invoke-ai.github.io/InvokeAI/"
|
|
"Source" = "https://github.com/invoke-ai/InvokeAI/"
|
|
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"
|
|
"Discord" = "https://discord.gg/ZmtBAhwWhy"
|
|
|
|
[tool.setuptools.dynamic]
|
|
version = { attr = "invokeai.version.__version__" }
|
|
|
|
[tool.setuptools.packages.find]
|
|
"where" = ["."]
|
|
"include" = [
|
|
"invokeai.assets.fonts*",
|
|
"invokeai.version*",
|
|
"invokeai.generator*",
|
|
"invokeai.backend*",
|
|
"invokeai.frontend*",
|
|
"invokeai.frontend.web.dist*",
|
|
"invokeai.frontend.web.static*",
|
|
"invokeai.configs*",
|
|
"invokeai.app*",
|
|
]
|
|
|
|
[tool.setuptools.package-data]
|
|
"invokeai.assets.fonts" = ["**/*.ttf"]
|
|
"invokeai.backend" = ["**.png"]
|
|
"invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"]
|
|
"invokeai.frontend.web.dist" = ["**"]
|
|
"invokeai.frontend.web.static" = ["**"]
|
|
|
|
#=== Begin: PyTest and Coverage
|
|
[tool.pytest.ini_options]
|
|
addopts = "--cov-report term --cov-report html --cov-report xml --strict-markers -m \"not slow\""
|
|
markers = [
|
|
"slow: Marks tests as slow. Disabled by default. To run all tests, use -m \"\". To run only slow tests, use -m \"slow\".",
|
|
]
|
|
[tool.coverage.run]
|
|
branch = true
|
|
source = ["invokeai"]
|
|
omit = ["*tests*", "*migrations*", ".venv/*", "*.env"]
|
|
[tool.coverage.report]
|
|
show_missing = true
|
|
fail_under = 85 # let's set something sensible on Day 1 ...
|
|
[tool.coverage.json]
|
|
output = "coverage/coverage.json"
|
|
pretty_print = true
|
|
[tool.coverage.html]
|
|
directory = "coverage/html"
|
|
[tool.coverage.xml]
|
|
output = "coverage/index.xml"
|
|
#=== End: PyTest and Coverage
|
|
|
|
[tool.flake8]
|
|
max-line-length = 120
|
|
ignore = ["E203", "E266", "E501", "W503"]
|
|
select = ["B", "C", "E", "F", "W", "T4"]
|
|
exclude = [
|
|
".git",
|
|
"__pycache__",
|
|
"build",
|
|
"dist",
|
|
"invokeai/frontend/web/node_modules/",
|
|
]
|
|
|
|
[tool.black]
|
|
line-length = 120
|
|
|
|
[tool.isort]
|
|
profile = "black"
|
|
line_length = 120
|
|
|
|
[tool.mypy]
|
|
ignore_missing_imports = true # ignores missing types in third-party libraries
|
|
|
|
[[tool.mypy.overrides]]
|
|
follow_imports = "skip"
|
|
module = [
|
|
"invokeai.app.api.routers.models",
|
|
"invokeai.app.invocations.compel",
|
|
"invokeai.app.invocations.latent",
|
|
"invokeai.app.services.config.config_base",
|
|
"invokeai.app.services.config.config_default",
|
|
"invokeai.app.services.invocation_stats.invocation_stats_default",
|
|
"invokeai.app.services.model_manager.model_manager_base",
|
|
"invokeai.app.services.model_manager.model_manager_default",
|
|
"invokeai.app.util.controlnet_utils",
|
|
"invokeai.backend.image_util.txt2mask",
|
|
"invokeai.backend.image_util.safety_checker",
|
|
"invokeai.backend.image_util.patchmatch",
|
|
"invokeai.backend.image_util.invisible_watermark",
|
|
"invokeai.backend.install.model_install_backend",
|
|
"invokeai.backend.ip_adapter.ip_adapter",
|
|
"invokeai.backend.ip_adapter.resampler",
|
|
"invokeai.backend.ip_adapter.unet_patcher",
|
|
"invokeai.backend.model_management.convert_ckpt_to_diffusers",
|
|
"invokeai.backend.model_management.lora",
|
|
"invokeai.backend.model_management.model_cache",
|
|
"invokeai.backend.model_management.model_manager",
|
|
"invokeai.backend.model_management.model_merge",
|
|
"invokeai.backend.model_management.model_probe",
|
|
"invokeai.backend.model_management.model_search",
|
|
"invokeai.backend.model_management.models.*", # this is needed to ignore the module's `__init__.py`
|
|
"invokeai.backend.model_management.models.base",
|
|
"invokeai.backend.model_management.models.controlnet",
|
|
"invokeai.backend.model_management.models.ip_adapter",
|
|
"invokeai.backend.model_management.models.lora",
|
|
"invokeai.backend.model_management.models.sdxl",
|
|
"invokeai.backend.model_management.models.stable_diffusion",
|
|
"invokeai.backend.model_management.models.vae",
|
|
"invokeai.backend.model_management.seamless",
|
|
"invokeai.backend.model_management.util",
|
|
"invokeai.backend.stable_diffusion.diffusers_pipeline",
|
|
"invokeai.backend.stable_diffusion.diffusion.cross_attention_control",
|
|
"invokeai.backend.stable_diffusion.diffusion.shared_invokeai_diffusion",
|
|
"invokeai.backend.util.hotfixes",
|
|
"invokeai.backend.util.logging",
|
|
"invokeai.backend.util.mps_fixes",
|
|
"invokeai.backend.util.util",
|
|
"invokeai.frontend.install.model_install",
|
|
]
|