2023-01-17 02:00:02 +00:00
|
|
|
[build-system]
|
2023-01-19 02:12:59 +00:00
|
|
|
requires = ["setuptools~=65.5", "pip~=22.3", "wheel"]
|
2023-01-17 02:00:02 +00:00
|
|
|
build-backend = "setuptools.build_meta"
|
|
|
|
|
|
|
|
[project]
|
|
|
|
name = "InvokeAI"
|
|
|
|
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
|
2023-07-24 21:13:32 +00:00
|
|
|
requires-python = ">=3.9, <3.12"
|
2023-01-17 02:00:02 +00:00
|
|
|
readme = { content-type = "text/markdown", file = "README.md" }
|
|
|
|
keywords = ["stable-diffusion", "AI"]
|
|
|
|
dynamic = ["version"]
|
|
|
|
license = { file = "LICENSE" }
|
|
|
|
authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }]
|
|
|
|
classifiers = [
|
|
|
|
'Development Status :: 4 - Beta',
|
|
|
|
'Environment :: GPU',
|
|
|
|
'Environment :: GPU :: NVIDIA CUDA',
|
|
|
|
'Environment :: MacOS X',
|
|
|
|
'Intended Audience :: End Users/Desktop',
|
|
|
|
'Intended Audience :: Developers',
|
|
|
|
'License :: OSI Approved :: MIT License',
|
|
|
|
'Operating System :: POSIX :: Linux',
|
|
|
|
'Operating System :: MacOS',
|
|
|
|
'Operating System :: Microsoft :: Windows',
|
|
|
|
'Programming Language :: Python :: 3 :: Only',
|
|
|
|
'Programming Language :: Python :: 3.10',
|
|
|
|
'Topic :: Artistic Software',
|
|
|
|
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
|
|
|
|
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
|
|
|
|
'Topic :: Multimedia :: Graphics',
|
|
|
|
'Topic :: Scientific/Engineering :: Artificial Intelligence',
|
|
|
|
'Topic :: Scientific/Engineering :: Image Processing',
|
|
|
|
]
|
|
|
|
dependencies = [
|
2023-09-20 21:47:06 +00:00
|
|
|
"accelerate~=0.23.0",
|
2023-01-17 02:00:02 +00:00
|
|
|
"albumentations",
|
2023-02-05 03:05:27 +00:00
|
|
|
"click",
|
2023-07-24 21:13:32 +00:00
|
|
|
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
|
2023-08-29 00:58:59 +00:00
|
|
|
"compel~=2.0.2",
|
2023-06-28 03:45:47 +00:00
|
|
|
"controlnet-aux>=0.0.6",
|
2023-07-24 21:13:32 +00:00
|
|
|
"timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26
|
2023-01-17 02:00:02 +00:00
|
|
|
"datasets",
|
2023-10-12 18:03:26 +00:00
|
|
|
# When bumping diffusers beyond 0.21, make sure to address this:
|
|
|
|
# https://github.com/invoke-ai/InvokeAI/blob/fc09ab7e13cb7ca5389100d149b6422ace7b8ed3/invokeai/app/invocations/latent.py#L513
|
2023-09-20 21:04:56 +00:00
|
|
|
"diffusers[torch]~=0.21.0",
|
2023-07-24 21:13:32 +00:00
|
|
|
"dnspython~=2.4.0",
|
2023-06-13 12:05:19 +00:00
|
|
|
"dynamicprompts",
|
Feat/easy param (#3504)
* Testing change to LatentsToText to allow setting different cfg_scale values per diffusion step.
* Adding first attempt at float param easing node, using Penner easing functions.
* Core implementation of ControlNet and MultiControlNet.
* Added support for ControlNet and MultiControlNet to legacy non-nodal Txt2Img in backend/generator. Although backend/generator will likely disappear by v3.x, right now they are very useful for testing core ControlNet and MultiControlNet functionality while node codebase is rapidly evolving.
* Added example of using ControlNet with legacy Txt2Img generator
* Resolving rebase conflict
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added an additional "raw_processed_image" output port to controlnets, mainly so could route ImageField to a ShowImage node
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* More rebase repair.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Fixed lint-ish formatting error
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Added dependency on controlnet-aux v0.0.3
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): add value to conditioning field
* fix(ui): add control field type
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Moved to controlnet_aux v0.0.4, reinstated Zoe controlnet preprocessor. Also in pyproject.toml had to specify downgrade of timm to 0.6.13 _after_ controlnet-aux installs timm >= 0.9.2, because timm >0.6.13 breaks Zoe preprocessor.
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Switching to ControlField for output from controlnet nodes.
* Resolving conflicts in rebase to origin/main
* Refactored ControlNet nodes so they subclass from PreprocessedControlInvocation, and only need to override run_processor(image) (instead of reimplementing invoke())
* changes to base class for controlnet nodes
* Added HED, LineArt, and OpenPose ControlNet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Added support for using multiple control nets. Unfortunately this breaks direct usage of Control node output port ==> TextToLatent control input port -- passing through a Collect node is now required. Working on fixing this...
* Fixed use of ControlNet control_weight parameter
* Core implementation of ControlNet and MultiControlNet.
* Added first controlnet preprocessor node for canny edge detection.
* Initial port of controlnet node support from generator-based TextToImageInvocation node to latent-based TextToLatentsInvocation node
* Switching to ControlField for output from controlnet nodes.
* Refactored controlnet node to output ControlField that bundles control info.
* changes to base class for controlnet nodes
* Added more preprocessor nodes for:
MidasDepth
ZoeDepth
MLSD
NormalBae
Pidi
LineartAnime
ContentShuffle
Removed pil_output options, ControlNet preprocessors should always output as PIL. Removed diagnostics and other general cleanup.
* Prep for splitting pre-processor and controlnet nodes
* Refactored controlnet nodes: split out controlnet stuff into separate node, stripped controlnet stuff form image processing/analysis nodes.
* Added resizing of controlnet image based on noise latent. Fixes a tensor mismatch issue.
* Cleaning up TextToLatent arg testing
* Cleaning up mistakes after rebase.
* Removed last bits of dtype and and device hardwiring from controlnet section
* Refactored ControNet support to consolidate multiple parameters into data struct. Also redid how multiple controlnets are handled.
* Added support for specifying which step iteration to start using
each ControlNet, and which step to end using each controlnet (specified as fraction of total steps)
* Cleaning up prior to submitting ControlNet PR. Mostly turning off diagnostic printing. Also fixed error when there is no controlnet input.
* Commented out ZoeDetector. Will re-instate once there's a controlnet-aux release that supports it.
* Switched CotrolNet node modelname input from free text to default list of popular ControlNet model names.
* Fix to work with current stable release of controlnet_aux (v0.0.3). Turned of pre-processor params that were added post v0.0.3. Also change defaults for shuffle.
* Refactored most of controlnet code into its own method to declutter TextToLatents.invoke(), and make upcoming integration with LatentsToLatents easier.
* Cleaning up after ControlNet refactor in TextToLatentsInvocation
* Extended node-based ControlNet support to LatentsToLatentsInvocation.
* chore(ui): regen api client
* fix(ui): fix node ui type hints
* fix(nodes): controlnet input accepts list or single controlnet
* Added Mediapipe image processor for use as ControlNet preprocessor.
Also hacked in ability to specify HF subfolder when loading ControlNet models from string.
* Fixed bug where MediapipFaceProcessorInvocation was ignoring max_faces and min_confidence params.
* Added nodes for float params: ParamFloatInvocation and FloatCollectionOutput. Also added FloatOutput.
* Added mediapipe install requirement. Should be able to remove once controlnet_aux package adds mediapipe to its requirements.
* Added float to FIELD_TYPE_MAP ins constants.ts
* Progress toward improvement in fieldTemplateBuilder.ts getFieldType()
* Fixed controlnet preprocessors and controlnet handling in TextToLatents to work with revised Image services.
* Cleaning up from merge, re-adding cfg_scale to FIELD_TYPE_MAP
* Making sure cfg_scale of type list[float] can be used in image metadata, to support param easing for cfg_scale
* Fixed math for per-step param easing.
* Added option to show plot of param value at each step
* Just cleaning up after adding param easing plot option, removing vestigial code.
* Modified control_weight ControlNet param to be polistmorphic --
can now be either a single float weight applied for all steps, or a list of floats of size total_steps, that specifies weight for each step.
* Added more informative error message when _validat_edge() throws an error.
* Just improving parm easing bar chart title to include easing type.
* Added requirement for easing-functions package
* Taking out some diagnostic prints.
* Added option to use both easing function and mirror of easing function together.
* Fixed recently introduced problem (when pulled in main), triggered by num_steps in StepParamEasingInvocation not having a default value -- just added default.
---------
Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
2023-06-11 06:27:44 +00:00
|
|
|
"easing-functions",
|
2023-01-17 02:00:02 +00:00
|
|
|
"einops",
|
|
|
|
"facexlib",
|
2023-03-26 06:40:30 +00:00
|
|
|
"fastapi==0.88.0",
|
2023-03-22 02:45:17 +00:00
|
|
|
"fastapi-events==0.8.0",
|
2023-08-17 20:42:29 +00:00
|
|
|
"huggingface-hub~=0.16.4",
|
2023-07-24 21:13:32 +00:00
|
|
|
"invisible-watermark~=0.2.0", # needed to install SDXL base and refiner using their repo_ids
|
2023-07-19 01:42:24 +00:00
|
|
|
"matplotlib", # needed for plotting of Penner easing functions
|
|
|
|
"mediapipe", # needed for "mediapipeface" controlnet model
|
2023-07-29 17:04:34 +00:00
|
|
|
"numpy",
|
2023-01-17 02:00:02 +00:00
|
|
|
"npyscreen",
|
|
|
|
"omegaconf",
|
2023-07-27 15:00:40 +00:00
|
|
|
"onnx",
|
2023-08-01 02:56:43 +00:00
|
|
|
"onnxruntime",
|
2023-01-17 02:00:02 +00:00
|
|
|
"opencv-python",
|
2023-07-29 17:04:34 +00:00
|
|
|
"pydantic==1.*",
|
2023-01-17 02:00:02 +00:00
|
|
|
"picklescan",
|
|
|
|
"pillow",
|
2023-01-27 06:20:50 +00:00
|
|
|
"prompt-toolkit",
|
2023-07-24 21:13:32 +00:00
|
|
|
"pympler~=1.0.1",
|
2023-01-17 02:00:02 +00:00
|
|
|
"pypatchmatch",
|
2023-06-03 20:17:53 +00:00
|
|
|
'pyperclip',
|
2023-01-17 02:00:02 +00:00
|
|
|
"pyreadline3",
|
2023-07-24 21:13:32 +00:00
|
|
|
"python-multipart",
|
2023-09-21 11:45:55 +00:00
|
|
|
"python-socketio",
|
2023-07-24 21:13:32 +00:00
|
|
|
"pytorch-lightning",
|
2023-01-17 02:00:02 +00:00
|
|
|
"realesrgan",
|
2023-07-24 21:13:32 +00:00
|
|
|
"requests~=2.28.2",
|
2023-03-05 02:37:39 +00:00
|
|
|
"rich~=13.3",
|
2023-10-10 22:00:15 +00:00
|
|
|
"safetensors~=0.4.0",
|
2023-07-24 21:13:32 +00:00
|
|
|
"scikit-image~=0.21.0",
|
2023-09-04 08:11:56 +00:00
|
|
|
"semver~=3.0.1",
|
2023-01-17 02:00:02 +00:00
|
|
|
"send2trash",
|
2023-07-24 21:13:32 +00:00
|
|
|
"test-tube~=0.7.5",
|
|
|
|
"torch~=2.0.1",
|
|
|
|
"torchvision~=0.15.2",
|
2023-07-29 17:28:29 +00:00
|
|
|
"torchmetrics~=0.11.0",
|
2023-07-24 21:13:32 +00:00
|
|
|
"torchsde~=0.2.5",
|
2023-07-19 13:46:21 +00:00
|
|
|
"transformers~=4.31.0",
|
2023-07-24 21:13:32 +00:00
|
|
|
"uvicorn[standard]~=0.21.1",
|
2023-01-17 02:00:02 +00:00
|
|
|
"windows-curses; sys_platform=='win32'",
|
|
|
|
]
|
|
|
|
|
|
|
|
[project.optional-dependencies]
|
|
|
|
"dist" = ["pip-tools", "pipdeptree", "twine"]
|
|
|
|
"docs" = [
|
|
|
|
"mkdocs-material<9.0",
|
|
|
|
"mkdocs-git-revision-date-localized-plugin",
|
|
|
|
"mkdocs-redirects==1.2.0",
|
|
|
|
]
|
2023-03-05 02:16:59 +00:00
|
|
|
"dev" = [
|
2023-08-18 01:48:04 +00:00
|
|
|
"jurigged",
|
2023-03-05 02:16:59 +00:00
|
|
|
"pudb",
|
|
|
|
]
|
2023-08-17 22:45:25 +00:00
|
|
|
"test" = [
|
|
|
|
"black",
|
|
|
|
"flake8",
|
|
|
|
"Flake8-pyproject",
|
2023-08-18 14:57:02 +00:00
|
|
|
"isort",
|
2023-08-20 01:17:44 +00:00
|
|
|
"pre-commit",
|
2023-08-17 22:45:25 +00:00
|
|
|
"pytest>6.0.0",
|
|
|
|
"pytest-cov",
|
|
|
|
"pytest-datadir",
|
|
|
|
]
|
2023-02-01 22:41:38 +00:00
|
|
|
"xformers" = [
|
2023-05-07 03:09:24 +00:00
|
|
|
"xformers~=0.0.19; sys_platform!='darwin'",
|
2023-02-02 05:28:38 +00:00
|
|
|
"triton; sys_platform=='linux'",
|
2023-02-01 22:41:38 +00:00
|
|
|
]
|
2023-07-27 14:28:26 +00:00
|
|
|
"onnx" = [
|
|
|
|
"onnxruntime",
|
|
|
|
]
|
|
|
|
"onnx-cuda" = [
|
|
|
|
"onnxruntime-gpu",
|
|
|
|
]
|
|
|
|
"onnx-directml" = [
|
|
|
|
"onnxruntime-directml",
|
|
|
|
]
|
2023-01-17 02:00:02 +00:00
|
|
|
|
|
|
|
[project.scripts]
|
2023-01-28 21:56:14 +00:00
|
|
|
|
|
|
|
# legacy entrypoints; provided for backwards compatibility
|
2023-08-09 18:55:02 +00:00
|
|
|
"configure_invokeai.py" = "invokeai.frontend.install.invokeai_configure:invokeai_configure"
|
2023-03-03 06:02:00 +00:00
|
|
|
"textual_inversion.py" = "invokeai.frontend.training:invokeai_textual_inversion"
|
2023-01-28 21:56:14 +00:00
|
|
|
|
2023-05-16 05:50:01 +00:00
|
|
|
# shortcut commands to start cli and web
|
2023-07-07 14:07:15 +00:00
|
|
|
# "invokeai --web" will launch the web interface
|
|
|
|
# "invokeai" will launch the CLI
|
|
|
|
"invokeai" = "invokeai.frontend.legacy_launch_invokeai:main"
|
|
|
|
|
|
|
|
# new shortcut to launch web interface
|
2023-05-16 05:50:01 +00:00
|
|
|
"invokeai-web" = "invokeai.app.api_app:invoke_api"
|
|
|
|
|
|
|
|
# full commands
|
2023-08-09 18:55:02 +00:00
|
|
|
"invokeai-configure" = "invokeai.frontend.install.invokeai_configure:invokeai_configure"
|
2023-03-03 05:02:15 +00:00
|
|
|
"invokeai-merge" = "invokeai.frontend.merge:invokeai_merge_diffusers"
|
|
|
|
"invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion"
|
2023-08-09 18:55:02 +00:00
|
|
|
"invokeai-model-install" = "invokeai.frontend.install.model_install:main"
|
2023-06-22 19:47:12 +00:00
|
|
|
"invokeai-migrate3" = "invokeai.backend.install.migrate_to_3:main"
|
2023-08-09 18:55:02 +00:00
|
|
|
"invokeai-update" = "invokeai.frontend.install.invokeai_update:main"
|
2023-09-19 18:59:44 +00:00
|
|
|
"invokeai-metadata" = "invokeai.backend.image_util.invoke_metadata:main"
|
2023-03-04 01:19:37 +00:00
|
|
|
"invokeai-node-cli" = "invokeai.app.cli_app:invoke_cli"
|
|
|
|
"invokeai-node-web" = "invokeai.app.api_app:invoke_api"
|
2023-08-05 16:19:24 +00:00
|
|
|
"invokeai-import-images" = "invokeai.frontend.install.import_images:main"
|
2023-09-19 04:08:00 +00:00
|
|
|
"invokeai-db-maintenance" = "invokeai.backend.util.db_maintenance:main"
|
2023-01-17 02:00:02 +00:00
|
|
|
|
|
|
|
[project.urls]
|
|
|
|
"Homepage" = "https://invoke-ai.github.io/InvokeAI/"
|
|
|
|
"Documentation" = "https://invoke-ai.github.io/InvokeAI/"
|
|
|
|
"Source" = "https://github.com/invoke-ai/InvokeAI/"
|
|
|
|
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"
|
|
|
|
"Discord" = "https://discord.gg/ZmtBAhwWhy"
|
|
|
|
|
|
|
|
[tool.setuptools.dynamic]
|
2023-03-02 18:28:17 +00:00
|
|
|
version = { attr = "invokeai.version.__version__" }
|
2023-01-17 02:00:02 +00:00
|
|
|
|
|
|
|
[tool.setuptools.packages.find]
|
|
|
|
"where" = ["."]
|
first phase of source tree restructure
This is the first phase of a big shifting of files and directories
in the source tree.
You will need to run `pip install -e .` before the code will work again!
Here's what's in the current commit:
1) Remove a lot of dead code that dealt with checkpoint and safetensor loading.
2) Entire ckpt_generator hierarchy is now gone!
3) ldm.invoke.generator.* => invokeai.generator.*
4) ldm.model.* => invokeai.model.*
5) ldm.invoke.model_manager => invokeai.model.model_manager
6) In addition, a number of frequently-accessed classes can be imported
from the invokeai.model and invokeai.generator modules:
from invokeai.generator import ( Generator, PipelineIntermediateState,
StableDiffusionGeneratorPipeline, infill_methods)
from invokeai.models import ( ModelManager, SDLegacyType
InvokeAIDiffuserComponent, AttentionMapSaver,
DDIMSampler, KSampler, PLMSSampler,
PostprocessingSettings )
2023-02-28 04:52:46 +00:00
|
|
|
"include" = [
|
2023-10-03 12:48:50 +00:00
|
|
|
"invokeai.assets.fonts*","invokeai.version*",
|
first phase of source tree restructure
This is the first phase of a big shifting of files and directories
in the source tree.
You will need to run `pip install -e .` before the code will work again!
Here's what's in the current commit:
1) Remove a lot of dead code that dealt with checkpoint and safetensor loading.
2) Entire ckpt_generator hierarchy is now gone!
3) ldm.invoke.generator.* => invokeai.generator.*
4) ldm.model.* => invokeai.model.*
5) ldm.invoke.model_manager => invokeai.model.model_manager
6) In addition, a number of frequently-accessed classes can be imported
from the invokeai.model and invokeai.generator modules:
from invokeai.generator import ( Generator, PipelineIntermediateState,
StableDiffusionGeneratorPipeline, infill_methods)
from invokeai.models import ( ModelManager, SDLegacyType
InvokeAIDiffuserComponent, AttentionMapSaver,
DDIMSampler, KSampler, PLMSSampler,
PostprocessingSettings )
2023-02-28 04:52:46 +00:00
|
|
|
"invokeai.generator*","invokeai.backend*",
|
2023-03-03 05:02:15 +00:00
|
|
|
"invokeai.frontend*", "invokeai.frontend.web.dist*",
|
2023-05-22 20:48:17 +00:00
|
|
|
"invokeai.frontend.web.static*",
|
2023-03-03 05:02:15 +00:00
|
|
|
"invokeai.configs*",
|
2023-10-03 12:48:50 +00:00
|
|
|
"invokeai.app*",
|
first phase of source tree restructure
This is the first phase of a big shifting of files and directories
in the source tree.
You will need to run `pip install -e .` before the code will work again!
Here's what's in the current commit:
1) Remove a lot of dead code that dealt with checkpoint and safetensor loading.
2) Entire ckpt_generator hierarchy is now gone!
3) ldm.invoke.generator.* => invokeai.generator.*
4) ldm.model.* => invokeai.model.*
5) ldm.invoke.model_manager => invokeai.model.model_manager
6) In addition, a number of frequently-accessed classes can be imported
from the invokeai.model and invokeai.generator modules:
from invokeai.generator import ( Generator, PipelineIntermediateState,
StableDiffusionGeneratorPipeline, infill_methods)
from invokeai.models import ( ModelManager, SDLegacyType
InvokeAIDiffuserComponent, AttentionMapSaver,
DDIMSampler, KSampler, PLMSSampler,
PostprocessingSettings )
2023-02-28 04:52:46 +00:00
|
|
|
]
|
2023-01-17 02:00:02 +00:00
|
|
|
|
|
|
|
[tool.setuptools.package-data]
|
2023-10-03 12:48:50 +00:00
|
|
|
"invokeai.assets.fonts" = ["**/*.ttf"]
|
2023-01-30 23:42:17 +00:00
|
|
|
"invokeai.backend" = ["**.png"]
|
|
|
|
"invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"]
|
2023-03-03 05:02:15 +00:00
|
|
|
"invokeai.frontend.web.dist" = ["**"]
|
2023-05-22 20:48:17 +00:00
|
|
|
"invokeai.frontend.web.static" = ["**"]
|
2023-01-17 02:00:02 +00:00
|
|
|
|
2023-03-05 17:00:08 +00:00
|
|
|
#=== Begin: PyTest and Coverage
|
2023-01-17 02:00:02 +00:00
|
|
|
[tool.pytest.ini_options]
|
2023-09-22 22:44:10 +00:00
|
|
|
addopts = "--cov-report term --cov-report html --cov-report xml --strict-markers -m \"not slow\""
|
|
|
|
markers = [
|
|
|
|
"slow: Marks tests as slow. Disabled by default. To run all tests, use -m \"\". To run only slow tests, use -m \"slow\"."
|
|
|
|
]
|
2023-03-05 17:00:08 +00:00
|
|
|
[tool.coverage.run]
|
|
|
|
branch = true
|
|
|
|
source = ["invokeai"]
|
|
|
|
omit = ["*tests*", "*migrations*", ".venv/*", "*.env"]
|
|
|
|
[tool.coverage.report]
|
|
|
|
show_missing = true
|
|
|
|
fail_under = 85 # let's set something sensible on Day 1 ...
|
|
|
|
[tool.coverage.json]
|
|
|
|
output = "coverage/coverage.json"
|
|
|
|
pretty_print = true
|
|
|
|
[tool.coverage.html]
|
|
|
|
directory = "coverage/html"
|
|
|
|
[tool.coverage.xml]
|
|
|
|
output = "coverage/index.xml"
|
|
|
|
#=== End: PyTest and Coverage
|
2023-03-03 06:02:00 +00:00
|
|
|
|
2023-07-21 14:14:31 +00:00
|
|
|
[tool.flake8]
|
2023-03-03 06:02:00 +00:00
|
|
|
max-line-length = 120
|
2023-08-17 22:45:25 +00:00
|
|
|
ignore = ["E203", "E266", "E501", "W503"]
|
|
|
|
select = ["B", "C", "E", "F", "W", "T4"]
|
2023-09-16 15:47:05 +00:00
|
|
|
exclude = [
|
|
|
|
".git",
|
|
|
|
"__pycache__",
|
|
|
|
"build",
|
|
|
|
"dist",
|
|
|
|
"invokeai/frontend/web/node_modules/"
|
|
|
|
]
|
2023-07-28 13:46:44 +00:00
|
|
|
|
|
|
|
[tool.black]
|
|
|
|
line-length = 120
|
2023-08-18 14:57:18 +00:00
|
|
|
|
|
|
|
[tool.isort]
|
|
|
|
profile = "black"
|
|
|
|
line_length = 120
|