[build-system] requires = ["setuptools~=65.5", "pip~=22.3", "wheel"] build-backend = "setuptools.build_meta" [project] name = "InvokeAI" description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process" requires-python = ">=3.10, <3.12" readme = { content-type = "text/markdown", file = "README.md" } keywords = ["stable-diffusion", "AI"] dynamic = ["version"] license = { file = "LICENSE" } authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }] classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: GPU', 'Environment :: GPU :: NVIDIA CUDA', 'Environment :: MacOS X', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX :: Linux', 'Operating System :: MacOS', 'Operating System :: Microsoft :: Windows', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: 3.10', 'Topic :: Artistic Software', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server', 'Topic :: Multimedia :: Graphics', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Scientific/Engineering :: Image Processing', ] dependencies = [ "accelerate~=0.23.0", "albumentations", "click", "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "compel~=2.0.2", "controlnet-aux>=0.0.6", "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 "datasets", # When bumping diffusers beyond 0.21, make sure to address this: # https://github.com/invoke-ai/InvokeAI/blob/fc09ab7e13cb7ca5389100d149b6422ace7b8ed3/invokeai/app/invocations/latent.py#L513 "diffusers[torch]~=0.21.0", "dnspython~=2.4.0", "dynamicprompts", "easing-functions", "einops", "facexlib", "fastapi~=0.103.2", "fastapi-events~=0.9.1", "huggingface-hub~=0.16.4", "invisible-watermark~=0.2.0", # needed to install SDXL base and refiner using their repo_ids "matplotlib", # needed for plotting of Penner easing functions "mediapipe", # needed for "mediapipeface" controlnet model "numpy", "npyscreen", "omegaconf", "onnx", "onnxruntime", "opencv-python", "pydantic~=2.4.2", "pydantic-settings~=2.0.3", "picklescan", "pillow", "prompt-toolkit", "pympler~=1.0.1", "pypatchmatch", 'pyperclip', "pyreadline3", "python-multipart", "python-socketio~=5.10.0", "pytorch-lightning", "realesrgan", "requests~=2.28.2", "rich~=13.3", "safetensors~=0.4.0", "scikit-image~=0.21.0", "semver~=3.0.1", "send2trash", "test-tube~=0.7.5", "torch~=2.0.1", "torchvision~=0.15.2", "torchmetrics~=0.11.0", "torchsde~=0.2.5", "transformers~=4.31.0", "uvicorn[standard]~=0.21.1", "windows-curses; sys_platform=='win32'", ] [project.optional-dependencies] "dist" = ["pip-tools", "pipdeptree", "twine"] "docs" = [ "mkdocs-material<9.0", "mkdocs-git-revision-date-localized-plugin", "mkdocs-redirects==1.2.0", ] "dev" = ["jurigged", "pudb"] "test" = [ "black", "flake8", "Flake8-pyproject", "isort", "mypy", "pre-commit", "pytest>6.0.0", "pytest-cov", "pytest-datadir", ] "xformers" = [ "xformers~=0.0.19; sys_platform!='darwin'", "triton; sys_platform=='linux'", ] "onnx" = ["onnxruntime"] "onnx-cuda" = ["onnxruntime-gpu"] "onnx-directml" = ["onnxruntime-directml"] [project.scripts] # legacy entrypoints; provided for backwards compatibility "configure_invokeai.py" = "invokeai.frontend.install.invokeai_configure:invokeai_configure" "textual_inversion.py" = "invokeai.frontend.training:invokeai_textual_inversion" # shortcut commands to start web ui # "invokeai --web" will launch the web interface # "invokeai" = "invokeai.frontend.legacy_launch_invokeai:main" # new shortcut to launch web interface "invokeai-web" = "invokeai.app.api_app:invoke_api" # full commands "invokeai-configure" = "invokeai.frontend.install.invokeai_configure:invokeai_configure" "invokeai-merge" = "invokeai.frontend.merge:invokeai_merge_diffusers" "invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion" "invokeai-model-install" = "invokeai.frontend.install.model_install:main" "invokeai-migrate3" = "invokeai.backend.install.migrate_to_3:main" "invokeai-update" = "invokeai.frontend.install.invokeai_update:main" "invokeai-metadata" = "invokeai.backend.image_util.invoke_metadata:main" "invokeai-node-web" = "invokeai.app.api_app:invoke_api" "invokeai-import-images" = "invokeai.frontend.install.import_images:main" "invokeai-db-maintenance" = "invokeai.backend.util.db_maintenance:main" [project.urls] "Homepage" = "https://invoke-ai.github.io/InvokeAI/" "Documentation" = "https://invoke-ai.github.io/InvokeAI/" "Source" = "https://github.com/invoke-ai/InvokeAI/" "Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues" "Discord" = "https://discord.gg/ZmtBAhwWhy" [tool.setuptools.dynamic] version = { attr = "invokeai.version.__version__" } [tool.setuptools.packages.find] "where" = ["."] "include" = [ "invokeai.assets.fonts*", "invokeai.version*", "invokeai.generator*", "invokeai.backend*", "invokeai.frontend*", "invokeai.frontend.web.dist*", "invokeai.frontend.web.static*", "invokeai.configs*", "invokeai.app*", ] [tool.setuptools.package-data] "invokeai.app.assets" = ["**/*.png"] "invokeai.assets.fonts" = ["**/*.ttf"] "invokeai.backend" = ["**.png"] "invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"] "invokeai.frontend.web.dist" = ["**"] "invokeai.frontend.web.static" = ["**"] "invokeai.app.invocations" = ["**"] #=== Begin: PyTest and Coverage [tool.pytest.ini_options] addopts = "--cov-report term --cov-report html --cov-report xml --strict-markers -m \"not slow\"" markers = [ "slow: Marks tests as slow. Disabled by default. To run all tests, use -m \"\". To run only slow tests, use -m \"slow\".", ] [tool.coverage.run] branch = true source = ["invokeai"] omit = ["*tests*", "*migrations*", ".venv/*", "*.env"] [tool.coverage.report] show_missing = true fail_under = 85 # let's set something sensible on Day 1 ... [tool.coverage.json] output = "coverage/coverage.json" pretty_print = true [tool.coverage.html] directory = "coverage/html" [tool.coverage.xml] output = "coverage/index.xml" #=== End: PyTest and Coverage [tool.flake8] max-line-length = 120 ignore = ["E203", "E266", "E501", "W503"] select = ["B", "C", "E", "F", "W", "T4"] exclude = [ ".git", "__pycache__", "build", "dist", "invokeai/frontend/web/node_modules/", ".venv*", ] [tool.black] line-length = 120 [tool.isort] profile = "black" line_length = 120 [tool.mypy] ignore_missing_imports = true # ignores missing types in third-party libraries [[tool.mypy.overrides]] follow_imports = "skip" module = [ "invokeai.app.api.routers.models", "invokeai.app.invocations.compel", "invokeai.app.invocations.latent", "invokeai.app.services.config.config_base", "invokeai.app.services.config.config_default", "invokeai.app.services.invocation_stats.invocation_stats_default", "invokeai.app.services.model_manager.model_manager_base", "invokeai.app.services.model_manager.model_manager_default", "invokeai.app.util.controlnet_utils", "invokeai.backend.image_util.txt2mask", "invokeai.backend.image_util.safety_checker", "invokeai.backend.image_util.patchmatch", "invokeai.backend.image_util.invisible_watermark", "invokeai.backend.install.model_install_backend", "invokeai.backend.ip_adapter.ip_adapter", "invokeai.backend.ip_adapter.resampler", "invokeai.backend.ip_adapter.unet_patcher", "invokeai.backend.model_management.convert_ckpt_to_diffusers", "invokeai.backend.model_management.lora", "invokeai.backend.model_management.model_cache", "invokeai.backend.model_management.model_manager", "invokeai.backend.model_management.model_merge", "invokeai.backend.model_management.model_probe", "invokeai.backend.model_management.model_search", "invokeai.backend.model_management.models.*", # this is needed to ignore the module's `__init__.py` "invokeai.backend.model_management.models.base", "invokeai.backend.model_management.models.controlnet", "invokeai.backend.model_management.models.ip_adapter", "invokeai.backend.model_management.models.lora", "invokeai.backend.model_management.models.sdxl", "invokeai.backend.model_management.models.stable_diffusion", "invokeai.backend.model_management.models.vae", "invokeai.backend.model_management.seamless", "invokeai.backend.model_management.util", "invokeai.backend.stable_diffusion.diffusers_pipeline", "invokeai.backend.stable_diffusion.diffusion.cross_attention_control", "invokeai.backend.stable_diffusion.diffusion.shared_invokeai_diffusion", "invokeai.backend.util.hotfixes", "invokeai.backend.util.logging", "invokeai.backend.util.mps_fixes", "invokeai.backend.util.util", "invokeai.frontend.install.model_install", ]