2023-01-17 02:00:02 +00:00
|
|
|
[build-system]
|
2023-01-19 02:12:59 +00:00
|
|
|
requires = ["setuptools~=65.5", "pip~=22.3", "wheel"]
|
2023-01-17 02:00:02 +00:00
|
|
|
build-backend = "setuptools.build_meta"
|
|
|
|
|
|
|
|
[project]
|
|
|
|
name = "InvokeAI"
|
|
|
|
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
|
|
|
|
requires-python = ">=3.9, <3.11"
|
|
|
|
readme = { content-type = "text/markdown", file = "README.md" }
|
|
|
|
keywords = ["stable-diffusion", "AI"]
|
|
|
|
dynamic = ["version"]
|
|
|
|
license = { file = "LICENSE" }
|
|
|
|
authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }]
|
|
|
|
classifiers = [
|
|
|
|
'Development Status :: 4 - Beta',
|
|
|
|
'Environment :: GPU',
|
|
|
|
'Environment :: GPU :: NVIDIA CUDA',
|
|
|
|
'Environment :: MacOS X',
|
|
|
|
'Intended Audience :: End Users/Desktop',
|
|
|
|
'Intended Audience :: Developers',
|
|
|
|
'License :: OSI Approved :: MIT License',
|
|
|
|
'Operating System :: POSIX :: Linux',
|
|
|
|
'Operating System :: MacOS',
|
|
|
|
'Operating System :: Microsoft :: Windows',
|
|
|
|
'Programming Language :: Python :: 3 :: Only',
|
|
|
|
'Programming Language :: Python :: 3.8',
|
|
|
|
'Programming Language :: Python :: 3.9',
|
|
|
|
'Programming Language :: Python :: 3.10',
|
|
|
|
'Topic :: Artistic Software',
|
|
|
|
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
|
|
|
|
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
|
|
|
|
'Topic :: Multimedia :: Graphics',
|
|
|
|
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
|
|
|
'Topic :: Scientific/Engineering :: Image Processing',
|
|
|
|
]
|
|
|
|
dependencies = [
|
2023-03-05 00:30:48 +00:00
|
|
|
"accelerate~=0.16",
|
2023-01-17 02:00:02 +00:00
|
|
|
"albumentations",
|
2023-02-05 03:05:27 +00:00
|
|
|
"click",
|
2023-01-26 14:35:16 +00:00
|
|
|
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
|
2023-06-04 13:30:54 +00:00
|
|
|
"compel>=1.2.1",
|
2023-06-28 03:45:47 +00:00
|
|
|
"controlnet-aux>=0.0.6",
|
2023-05-23 20:27:13 +00:00
|
|
|
"timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26
|
2023-01-17 02:00:02 +00:00
|
|
|
"datasets",
|
2023-06-16 11:57:57 +00:00
|
|
|
"diffusers[torch]~=0.17.1",
|
2023-01-17 02:00:02 +00:00
|
|
|
"dnspython==2.2.1",
|
2023-06-13 12:05:19 +00:00
|
|
|
"dynamicprompts",
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
"easing-functions",
|
2023-01-17 02:00:02 +00:00
|
|
|
"einops",
|
|
|
|
"eventlet",
|
|
|
|
"facexlib",
|
2023-03-26 06:40:30 +00:00
|
|
|
"fastapi==0.88.0",
|
2023-03-22 02:45:17 +00:00
|
|
|
"fastapi-events==0.8.0",
|
|
|
|
"fastapi-socketio==0.0.10",
|
2023-01-17 02:00:02 +00:00
|
|
|
"flask==2.1.3",
|
|
|
|
"flask_cors==3.0.10",
|
|
|
|
"flask_socketio==5.3.0",
|
|
|
|
"flaskwebgui==1.0.3",
|
|
|
|
"gfpgan==1.3.8",
|
|
|
|
"huggingface-hub>=0.11.1",
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
"matplotlib", # needed for plotting of Penner easing functions
|
2023-05-26 18:32:09 +00:00
|
|
|
"mediapipe", # needed for "mediapipeface" controlnet model
|
2023-01-17 02:00:02 +00:00
|
|
|
"npyscreen",
|
2023-01-30 23:42:17 +00:00
|
|
|
"numpy<1.24",
|
2023-01-17 02:00:02 +00:00
|
|
|
"omegaconf",
|
|
|
|
"opencv-python",
|
|
|
|
"picklescan",
|
|
|
|
"pillow",
|
2023-01-27 06:20:50 +00:00
|
|
|
"prompt-toolkit",
|
2023-04-28 04:41:52 +00:00
|
|
|
"pympler==1.0.1",
|
2023-01-17 02:00:02 +00:00
|
|
|
"pypatchmatch",
|
2023-06-03 20:17:53 +00:00
|
|
|
'pyperclip',
|
2023-01-17 02:00:02 +00:00
|
|
|
"pyreadline3",
|
2023-04-07 18:43:42 +00:00
|
|
|
"python-multipart==0.0.6",
|
2023-03-08 21:48:39 +00:00
|
|
|
"pytorch-lightning==1.7.7",
|
2023-01-17 02:00:02 +00:00
|
|
|
"realesrgan",
|
2023-01-30 23:42:17 +00:00
|
|
|
"requests==2.28.2",
|
2023-03-05 02:37:39 +00:00
|
|
|
"rich~=13.3",
|
2023-03-05 00:30:48 +00:00
|
|
|
"safetensors~=0.3.0",
|
2023-01-17 02:00:02 +00:00
|
|
|
"scikit-image>=0.19",
|
|
|
|
"send2trash",
|
|
|
|
"test-tube>=0.7.5",
|
2023-05-07 03:09:24 +00:00
|
|
|
"torch~=2.0.0",
|
2023-01-17 02:00:02 +00:00
|
|
|
"torchvision>=0.14.1",
|
|
|
|
"torchmetrics",
|
2023-06-19 11:20:53 +00:00
|
|
|
"torchsde==0.2.5",
|
2023-06-12 20:07:39 +00:00
|
|
|
"transformers~=4.30",
|
2023-03-22 02:45:17 +00:00
|
|
|
"uvicorn[standard]==0.21.1",
|
2023-01-17 02:00:02 +00:00
|
|
|
"windows-curses; sys_platform=='win32'",
|
|
|
|
]
|
|
|
|
|
|
|
|
[project.optional-dependencies]
|
|
|
|
"dist" = ["pip-tools", "pipdeptree", "twine"]
|
|
|
|
"docs" = [
|
|
|
|
"mkdocs-material<9.0",
|
|
|
|
"mkdocs-git-revision-date-localized-plugin",
|
|
|
|
"mkdocs-redirects==1.2.0",
|
|
|
|
]
|
2023-03-05 02:16:59 +00:00
|
|
|
"dev" = [
|
|
|
|
"pudb",
|
|
|
|
]
|
2023-01-27 00:45:04 +00:00
|
|
|
"test" = ["pytest>6.0.0", "pytest-cov"]
|
2023-02-01 22:41:38 +00:00
|
|
|
"xformers" = [
|
2023-05-07 03:09:24 +00:00
|
|
|
"xformers~=0.0.19; sys_platform!='darwin'",
|
2023-02-02 05:28:38 +00:00
|
|
|
"triton; sys_platform=='linux'",
|
2023-02-01 22:41:38 +00:00
|
|
|
]
|
2023-01-17 02:00:02 +00:00
|
|
|
|
|
|
|
[project.scripts]
|
2023-01-28 21:56:14 +00:00
|
|
|
|
|
|
|
# legacy entrypoints; provided for backwards compatibility
|
2023-03-03 06:02:00 +00:00
|
|
|
"configure_invokeai.py" = "invokeai.frontend.install:invokeai_configure"
|
|
|
|
"textual_inversion.py" = "invokeai.frontend.training:invokeai_textual_inversion"
|
2023-01-28 21:56:14 +00:00
|
|
|
|
2023-05-16 05:50:01 +00:00
|
|
|
# shortcut commands to start cli and web
|
|
|
|
"invokeai" = "invokeai.app.cli_app:invoke_cli"
|
|
|
|
"invokeai-web" = "invokeai.app.api_app:invoke_api"
|
|
|
|
|
|
|
|
# full commands
|
2023-03-03 06:02:00 +00:00
|
|
|
"invokeai-configure" = "invokeai.frontend.install:invokeai_configure"
|
2023-03-03 05:02:15 +00:00
|
|
|
"invokeai-merge" = "invokeai.frontend.merge:invokeai_merge_diffusers"
|
|
|
|
"invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion"
|
2023-03-03 06:02:00 +00:00
|
|
|
"invokeai-model-install" = "invokeai.frontend.install:invokeai_model_install"
|
2023-06-22 19:47:12 +00:00
|
|
|
"invokeai-migrate3" = "invokeai.backend.install.migrate_to_3:main"
|
2023-04-02 15:08:12 +00:00
|
|
|
"invokeai-update" = "invokeai.frontend.install:invokeai_update"
|
2023-03-04 01:19:37 +00:00
|
|
|
"invokeai-metadata" = "invokeai.frontend.CLI.sd_metadata:print_metadata"
|
|
|
|
"invokeai-node-cli" = "invokeai.app.cli_app:invoke_cli"
|
|
|
|
"invokeai-node-web" = "invokeai.app.api_app:invoke_api"
|
2023-01-17 02:00:02 +00:00
|
|
|
|
|
|
|
[project.urls]
|
|
|
|
"Homepage" = "https://invoke-ai.github.io/InvokeAI/"
|
|
|
|
"Documentation" = "https://invoke-ai.github.io/InvokeAI/"
|
|
|
|
"Source" = "https://github.com/invoke-ai/InvokeAI/"
|
|
|
|
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"
|
|
|
|
"Discord" = "https://discord.gg/ZmtBAhwWhy"
|
|
|
|
|
|
|
|
[tool.setuptools.dynamic]
|
2023-03-02 18:28:17 +00:00
|
|
|
version = { attr = "invokeai.version.__version__" }
|
2023-01-17 02:00:02 +00:00
|
|
|
|
|
|
|
[tool.setuptools.packages.find]
|
|
|
|
"where" = ["."]
|
first phase of source tree restructure
This is the first phase of a big shifting of files and directories
in the source tree.
You will need to run `pip install -e .` before the code will work again!
Here's what's in the current commit:
1) Remove a lot of dead code that dealt with checkpoint and safetensor loading.
2) Entire ckpt_generator hierarchy is now gone!
3) ldm.invoke.generator.* => invokeai.generator.*
4) ldm.model.* => invokeai.model.*
5) ldm.invoke.model_manager => invokeai.model.model_manager
6) In addition, a number of frequently-accessed classes can be imported
from the invokeai.model and invokeai.generator modules:
from invokeai.generator import ( Generator, PipelineIntermediateState,
StableDiffusionGeneratorPipeline, infill_methods)
from invokeai.models import ( ModelManager, SDLegacyType
InvokeAIDiffuserComponent, AttentionMapSaver,
DDIMSampler, KSampler, PLMSSampler,
PostprocessingSettings )
2023-02-28 04:52:46 +00:00
|
|
|
"include" = [
|
2023-03-05 02:16:59 +00:00
|
|
|
"invokeai.assets.web*","invokeai.version*",
|
first phase of source tree restructure
This is the first phase of a big shifting of files and directories
in the source tree.
You will need to run `pip install -e .` before the code will work again!
Here's what's in the current commit:
1) Remove a lot of dead code that dealt with checkpoint and safetensor loading.
2) Entire ckpt_generator hierarchy is now gone!
3) ldm.invoke.generator.* => invokeai.generator.*
4) ldm.model.* => invokeai.model.*
5) ldm.invoke.model_manager => invokeai.model.model_manager
6) In addition, a number of frequently-accessed classes can be imported
from the invokeai.model and invokeai.generator modules:
from invokeai.generator import ( Generator, PipelineIntermediateState,
StableDiffusionGeneratorPipeline, infill_methods)
from invokeai.models import ( ModelManager, SDLegacyType
InvokeAIDiffuserComponent, AttentionMapSaver,
DDIMSampler, KSampler, PLMSSampler,
PostprocessingSettings )
2023-02-28 04:52:46 +00:00
|
|
|
"invokeai.generator*","invokeai.backend*",
|
2023-03-03 05:02:15 +00:00
|
|
|
"invokeai.frontend*", "invokeai.frontend.web.dist*",
|
2023-05-22 20:48:17 +00:00
|
|
|
"invokeai.frontend.web.static*",
|
2023-03-03 05:02:15 +00:00
|
|
|
"invokeai.configs*",
|
|
|
|
"invokeai.app*","ldm*",
|
first phase of source tree restructure
This is the first phase of a big shifting of files and directories
in the source tree.
You will need to run `pip install -e .` before the code will work again!
Here's what's in the current commit:
1) Remove a lot of dead code that dealt with checkpoint and safetensor loading.
2) Entire ckpt_generator hierarchy is now gone!
3) ldm.invoke.generator.* => invokeai.generator.*
4) ldm.model.* => invokeai.model.*
5) ldm.invoke.model_manager => invokeai.model.model_manager
6) In addition, a number of frequently-accessed classes can be imported
from the invokeai.model and invokeai.generator modules:
from invokeai.generator import ( Generator, PipelineIntermediateState,
StableDiffusionGeneratorPipeline, infill_methods)
from invokeai.models import ( ModelManager, SDLegacyType
InvokeAIDiffuserComponent, AttentionMapSaver,
DDIMSampler, KSampler, PLMSSampler,
PostprocessingSettings )
2023-02-28 04:52:46 +00:00
|
|
|
]
|
2023-01-17 02:00:02 +00:00
|
|
|
|
|
|
|
[tool.setuptools.package-data]
|
2023-01-30 23:42:17 +00:00
|
|
|
"invokeai.assets.web" = ["**.png"]
|
|
|
|
"invokeai.backend" = ["**.png"]
|
|
|
|
"invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"]
|
2023-03-03 05:02:15 +00:00
|
|
|
"invokeai.frontend.web.dist" = ["**"]
|
2023-05-22 20:48:17 +00:00
|
|
|
"invokeai.frontend.web.static" = ["**"]
|
2023-01-17 02:00:02 +00:00
|
|
|
|
2023-03-05 17:00:08 +00:00
|
|
|
#=== Begin: PyTest and Coverage
|
2023-01-17 02:00:02 +00:00
|
|
|
[tool.pytest.ini_options]
|
2023-03-05 17:00:08 +00:00
|
|
|
addopts = "--cov-report term --cov-report html --cov-report xml"
|
|
|
|
[tool.coverage.run]
|
|
|
|
branch = true
|
|
|
|
source = ["invokeai"]
|
|
|
|
omit = ["*tests*", "*migrations*", ".venv/*", "*.env"]
|
|
|
|
[tool.coverage.report]
|
|
|
|
show_missing = true
|
|
|
|
fail_under = 85 # let's set something sensible on Day 1 ...
|
|
|
|
[tool.coverage.json]
|
|
|
|
output = "coverage/coverage.json"
|
|
|
|
pretty_print = true
|
|
|
|
[tool.coverage.html]
|
|
|
|
directory = "coverage/html"
|
|
|
|
[tool.coverage.xml]
|
|
|
|
output = "coverage/index.xml"
|
|
|
|
#=== End: PyTest and Coverage
|
2023-03-03 06:02:00 +00:00
|
|
|
|
|
|
|
[flake8]
|
|
|
|
max-line-length = 120
|