diff --git a/.editorconfig b/.editorconfig index d4b0972eda..0ded504342 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,5 +1,8 @@ +root = true + # All files [*] +max_line_length = 80 charset = utf-8 end_of_line = lf indent_size = 2 @@ -10,3 +13,18 @@ trim_trailing_whitespace = true # Python [*.py] indent_size = 4 +max_line_length = 120 + +# css +[*.css] +indent_size = 4 + +# flake8 +[.flake8] +indent_size = 4 + +# Markdown MkDocs +[docs/**/*.md] +max_line_length = 80 +indent_size = 4 +indent_style = unset diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000..2159b9dcc6 --- /dev/null +++ b/.flake8 @@ -0,0 +1,37 @@ +[flake8] +max-line-length = 120 +extend-ignore = + # See https://github.com/PyCQA/pycodestyle/issues/373 + E203, + # use Bugbear's B950 instead + E501, + # from black repo https://github.com/psf/black/blob/main/.flake8 + E266, W503, B907 +extend-select = + # Bugbear line length + B950 +extend-exclude = + scripts/orig_scripts/* + ldm/models/* + ldm/modules/* + ldm/data/* + ldm/generate.py + ldm/util.py + ldm/simplet2i.py +per-file-ignores = + # B950 line too long + # W605 invalid escape sequence + # F841 assigned to but never used + # F401 imported but unused + tests/test_prompt_parser.py: B950, W605, F401 + tests/test_textual_inversion.py: F841, B950 + # B023 Function definition does not bind loop variable + scripts/legacy_api.py: F401, B950, B023, F841 + ldm/invoke/__init__.py: F401 + # B010 Do not call setattr with a constant attribute value + ldm/invoke/server_legacy.py: B010 +# ===================== +# flake-quote settings: +# ===================== +# Set this to match black style: +inline-quotes = double diff --git a/.github/workflows/mkdocs-material.yml b/.github/workflows/mkdocs-material.yml index 26a46c1328..bddccd4f1b 100644 --- a/.github/workflows/mkdocs-material.yml +++ b/.github/workflows/mkdocs-material.yml @@ -9,6 +9,10 @@ jobs: mkdocs-material: if: github.event.pull_request.draft == false runs-on: ubuntu-latest + env: + REPO_URL: '${{ github.server_url }}/${{ github.repository }}' + REPO_NAME: '${{ github.repository }}' + SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI' steps: - name: checkout sources uses: actions/checkout@v3 @@ -19,11 +23,15 @@ jobs: uses: actions/setup-python@v4 with: python-version: '3.10' + cache: pip + cache-dependency-path: pyproject.toml - name: install requirements + env: + PIP_USE_PEP517: 1 run: | python -m \ - pip install -r docs/requirements-mkdocs.txt + pip install ".[docs]" - name: confirm buildability run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..e5d024eaee --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,41 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: + - repo: https://github.com/psf/black + rev: 23.1.0 + hooks: + - id: black + + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + + - repo: https://github.com/PyCQA/flake8 + rev: 6.0.0 + hooks: + - id: flake8 + additional_dependencies: + - flake8-black + - flake8-bugbear + - flake8-comprehensions + - flake8-simplify + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: 'v3.0.0-alpha.4' + hooks: + - id: prettier + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-added-large-files + - id: check-executables-have-shebangs + - id: check-shebang-scripts-are-executable + - id: check-merge-conflict + - id: check-symlinks + - id: check-toml + - id: end-of-file-fixer + - id: no-commit-to-branch + args: ['--branch', 'main'] + - id: trailing-whitespace diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000000..2ef13d5aae --- /dev/null +++ b/.prettierignore @@ -0,0 +1,14 @@ +invokeai/frontend/.husky +invokeai/frontend/patches + +# Ignore artifacts: +build +coverage +static +invokeai/frontend/dist + +# Ignore all HTML files: +*.html + +# Ignore deprecated docs +docs/installation/deprecated_documentation diff --git a/.prettierrc.yaml b/.prettierrc.yaml index ce4b99a07b..457c8267f6 100644 --- a/.prettierrc.yaml +++ b/.prettierrc.yaml @@ -1,9 +1,9 @@ -endOfLine: lf -tabWidth: 2 -useTabs: false -singleQuote: true -quoteProps: as-needed embeddedLanguageFormatting: auto +endOfLine: lf +singleQuote: true +semi: true +trailingComma: es5 +useTabs: false overrides: - files: '*.md' options: @@ -11,3 +11,9 @@ overrides: printWidth: 80 parser: markdown cursorOffset: -1 + - files: docs/**/*.md + options: + tabWidth: 4 + - files: 'invokeai/frontend/public/locales/*.json' + options: + tabWidth: 4 diff --git a/docs/.markdownlint.jsonc b/docs/.markdownlint.jsonc new file mode 100644 index 0000000000..c6b91b533f --- /dev/null +++ b/docs/.markdownlint.jsonc @@ -0,0 +1,5 @@ +{ + "MD046": false, + "MD007": false, + "MD030": false +} diff --git a/docs/features/index.md b/docs/features/index.md index d9b0e1fd7c..50fc8bc77c 100644 --- a/docs/features/index.md +++ b/docs/features/index.md @@ -2,62 +2,82 @@ title: Overview --- -Here you can find the documentation for InvokeAI's various features. +- The Basics -## The Basics -### * The [Web User Interface](WEB.md) -Guide to the Web interface. Also see the [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md) + - The [Web User Interface](WEB.md) -### * The [Unified Canvas](UNIFIED_CANVAS.md) -Build complex scenes by combine and modifying multiple images in a stepwise -fashion. This feature combines img2img, inpainting and outpainting in -a single convenient digital artist-optimized user interface. + Guide to the Web interface. Also see the + [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md) -### * The [Command Line Interface (CLI)](CLI.md) -Scriptable access to InvokeAI's features. + - The [Unified Canvas](UNIFIED_CANVAS.md) -## Image Generation -### * [Prompt Engineering](PROMPTS.md) -Get the images you want with the InvokeAI prompt engineering language. + Build complex scenes by combine and modifying multiple images in a + stepwise fashion. This feature combines img2img, inpainting and + outpainting in a single convenient digital artist-optimized user + interface. -## * [Post-Processing](POSTPROCESS.md) -Restore mangled faces and make images larger with upscaling. Also see the [Embiggen Upscaling Guide](EMBIGGEN.md). + - The [Command Line Interface (CLI)](CLI.md) -## * The [Concepts Library](CONCEPTS.md) -Add custom subjects and styles using HuggingFace's repository of embeddings. + Scriptable access to InvokeAI's features. -### * [Image-to-Image Guide for the CLI](IMG2IMG.md) -Use a seed image to build new creations in the CLI. +- Image Generation -### * [Inpainting Guide for the CLI](INPAINTING.md) -Selectively erase and replace portions of an existing image in the CLI. + - [Prompt Engineering](PROMPTS.md) -### * [Outpainting Guide for the CLI](OUTPAINTING.md) -Extend the borders of the image with an "outcrop" function within the CLI. + Get the images you want with the InvokeAI prompt engineering language. -### * [Generating Variations](VARIATIONS.md) -Have an image you like and want to generate many more like it? Variations -are the ticket. + - [Post-Processing](POSTPROCESS.md) -## Model Management + Restore mangled faces and make images larger with upscaling. Also see + the [Embiggen Upscaling Guide](EMBIGGEN.md). -## * [Model Installation](../installation/050_INSTALLING_MODELS.md) -Learn how to import third-party models and switch among them. This -guide also covers optimizing models to load quickly. + - The [Concepts Library](CONCEPTS.md) -## * [Merging Models](MODEL_MERGING.md) -Teach an old model new tricks. Merge 2-3 models together to create a -new model that combines characteristics of the originals. + Add custom subjects and styles using HuggingFace's repository of + embeddings. -## * [Textual Inversion](TEXTUAL_INVERSION.md) -Personalize models by adding your own style or subjects. + - [Image-to-Image Guide for the CLI](IMG2IMG.md) -# Other Features + Use a seed image to build new creations in the CLI. -## * [The NSFW Checker](NSFW.md) -Prevent InvokeAI from displaying unwanted racy images. + - [Inpainting Guide for the CLI](INPAINTING.md) -## * [Miscellaneous](OTHER.md) -Run InvokeAI on Google Colab, generate images with repeating patterns, -batch process a file of prompts, increase the "creativity" of image -generation by adding initial noise, and more! + Selectively erase and replace portions of an existing image in the CLI. + + - [Outpainting Guide for the CLI](OUTPAINTING.md) + + Extend the borders of the image with an "outcrop" function within the + CLI. + + - [Generating Variations](VARIATIONS.md) + + Have an image you like and want to generate many more like it? + Variations are the ticket. + +- Model Management + + - [Model Installation](../installation/050_INSTALLING_MODELS.md) + + Learn how to import third-party models and switch among them. This guide + also covers optimizing models to load quickly. + + - [Merging Models](MODEL_MERGING.md) + + Teach an old model new tricks. Merge 2-3 models together to create a new + model that combines characteristics of the originals. + + - [Textual Inversion](TEXTUAL_INVERSION.md) + + Personalize models by adding your own style or subjects. + +- Other Features + + - [The NSFW Checker](NSFW.md) + + Prevent InvokeAI from displaying unwanted racy images. + + - [Miscellaneous](OTHER.md) + + Run InvokeAI on Google Colab, generate images with repeating patterns, + batch process a file of prompts, increase the "creativity" of image + generation by adding initial noise, and more! diff --git a/docs/help/IDE-Settings/index.md b/docs/help/IDE-Settings/index.md new file mode 100644 index 0000000000..bdb045f800 --- /dev/null +++ b/docs/help/IDE-Settings/index.md @@ -0,0 +1,4 @@ +# :octicons-file-code-16: IDE-Settings + +Here we will share settings for IDEs used by our developers, maybe you can find +something interestening which will help to boost your development efficency 🔥 diff --git a/docs/help/IDE-Settings/vs-code.md b/docs/help/IDE-Settings/vs-code.md new file mode 100644 index 0000000000..25be691d0e --- /dev/null +++ b/docs/help/IDE-Settings/vs-code.md @@ -0,0 +1,250 @@ +--- +title: Visual Studio Code +--- + +# :material-microsoft-visual-studio-code:Visual Studio Code + +The Workspace Settings are stored in the project (repository) root and get +higher priorized than your user settings. + +This helps to have different settings for different projects, while the user +settings get used as a default value if no workspace settings are provided. + +## tasks.json + +First we will create a task configuration which will create a virtual +environment and update the deps (pip, setuptools and wheel). + +Into this venv we will then install the pyproject.toml in editable mode with +dev, docs and test dependencies. + +```json title=".vscode/tasks.json" +{ + // See https://go.microsoft.com/fwlink/?LinkId=733558 + // for the documentation about the tasks.json format + "version": "2.0.0", + "tasks": [ + { + "label": "Create virtual environment", + "detail": "Create .venv and upgrade pip, setuptools and wheel", + "command": "python3", + "args": [ + "-m", + "venv", + ".venv", + "--prompt", + "InvokeAI", + "--upgrade-deps" + ], + "runOptions": { + "instanceLimit": 1, + "reevaluateOnRerun": true + }, + "group": { + "kind": "build" + }, + "presentation": { + "echo": true, + "reveal": "always", + "focus": false, + "panel": "shared", + "showReuseMessage": true, + "clear": false + } + }, + { + "label": "build InvokeAI", + "detail": "Build pyproject.toml with extras dev, docs and test", + "command": "${workspaceFolder}/.venv/bin/python3", + "args": [ + "-m", + "pip", + "install", + "--use-pep517", + "--editable", + ".[dev,docs,test]" + ], + "dependsOn": "Create virtual environment", + "dependsOrder": "sequence", + "group": { + "kind": "build", + "isDefault": true + }, + "presentation": { + "echo": true, + "reveal": "always", + "focus": false, + "panel": "shared", + "showReuseMessage": true, + "clear": false + } + } + ] +} +``` + +The fastest way to build InvokeAI now is ++cmd+shift+b++ + +## launch.json + +This file is used to define debugger configurations, so that you can one-click +launch and monitor the application, set halt points to inspect specific states, +... + +```json title=".vscode/launch.json" +{ + "version": "0.2.0", + "configurations": [ + { + "name": "invokeai web", + "type": "python", + "request": "launch", + "program": ".venv/bin/invokeai", + "justMyCode": true + }, + { + "name": "invokeai cli", + "type": "python", + "request": "launch", + "program": ".venv/bin/invokeai", + "justMyCode": true + }, + { + "name": "mkdocs serve", + "type": "python", + "request": "launch", + "program": ".venv/bin/mkdocs", + "args": ["serve"], + "justMyCode": true + } + ] +} +``` + +Then you only need to hit ++f5++ and the fun begins :nerd: (It is asumed that +you have created a virtual environment via the [tasks](#tasksjson) from the +previous step.) + +## extensions.json + +A list of recommended vscode-extensions to make your life easier: + +```json title=".vscode/extensions.json" +{ + "recommendations": [ + "editorconfig.editorconfig", + "github.vscode-pull-request-github", + "ms-python.black-formatter", + "ms-python.flake8", + "ms-python.isort", + "ms-python.python", + "ms-python.vscode-pylance", + "redhat.vscode-yaml", + "tamasfe.even-better-toml", + "eamodio.gitlens", + "foxundermoon.shell-format", + "timonwong.shellcheck", + "esbenp.prettier-vscode", + "davidanson.vscode-markdownlint", + "yzhang.markdown-all-in-one", + "bierner.github-markdown-preview", + "ms-azuretools.vscode-docker", + "mads-hartmann.bash-ide-vscode" + ] +} +``` + +## settings.json + +With bellow settings your files already get formated when you save them (only +your modifications if available), which will help you to not run into trouble +with the pre-commit hooks. If the hooks fail, they will prevent you from +commiting, but most hooks directly add a fixed version, so that you just need to +stage and commit them: + +```json title=".vscode/settings.json" +{ + "[json]": { + "editor.defaultFormatter": "esbenp.prettier-vscode", + "editor.quickSuggestions": { + "comments": false, + "strings": true, + "other": true + }, + "editor.suggest.insertMode": "replace", + "gitlens.codeLens.scopes": ["document"] + }, + "[jsonc]": { + "editor.defaultFormatter": "esbenp.prettier-vscode", + "editor.formatOnSave": true, + "editor.formatOnSaveMode": "modificationsIfAvailable" + }, + "[python]": { + "editor.defaultFormatter": "ms-python.black-formatter", + "editor.formatOnSave": true, + "editor.formatOnSaveMode": "file" + }, + "[toml]": { + "editor.defaultFormatter": "tamasfe.even-better-toml", + "editor.formatOnSave": true, + "editor.formatOnSaveMode": "modificationsIfAvailable" + }, + "[yaml]": { + "editor.defaultFormatter": "esbenp.prettier-vscode", + "editor.formatOnSave": true, + "editor.formatOnSaveMode": "modificationsIfAvailable" + }, + "[markdown]": { + "editor.defaultFormatter": "esbenp.prettier-vscode", + "editor.rulers": [80], + "editor.unicodeHighlight.ambiguousCharacters": false, + "editor.unicodeHighlight.invisibleCharacters": false, + "diffEditor.ignoreTrimWhitespace": false, + "editor.wordWrap": "on", + "editor.quickSuggestions": { + "comments": "off", + "strings": "off", + "other": "off" + }, + "editor.formatOnSave": true, + "editor.formatOnSaveMode": "modificationsIfAvailable" + }, + "[shellscript]": { + "editor.defaultFormatter": "foxundermoon.shell-format" + }, + "[ignore]": { + "editor.defaultFormatter": "foxundermoon.shell-format" + }, + "editor.rulers": [88], + "evenBetterToml.formatter.alignEntries": false, + "evenBetterToml.formatter.allowedBlankLines": 1, + "evenBetterToml.formatter.arrayAutoExpand": true, + "evenBetterToml.formatter.arrayTrailingComma": true, + "evenBetterToml.formatter.arrayAutoCollapse": true, + "evenBetterToml.formatter.columnWidth": 88, + "evenBetterToml.formatter.compactArrays": true, + "evenBetterToml.formatter.compactInlineTables": true, + "evenBetterToml.formatter.indentEntries": false, + "evenBetterToml.formatter.inlineTableExpand": true, + "evenBetterToml.formatter.reorderArrays": true, + "evenBetterToml.formatter.reorderKeys": true, + "evenBetterToml.formatter.compactEntries": false, + "evenBetterToml.schema.enabled": true, + "python.analysis.typeCheckingMode": "basic", + "python.formatting.provider": "black", + "python.languageServer": "Pylance", + "python.linting.enabled": true, + "python.linting.flake8Enabled": true, + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "python.testing.pytestArgs": [ + "tests", + "--cov=ldm", + "--cov-branch", + "--cov-report=term:skip-covered" + ], + "yaml.schemas": { + "https://json.schemastore.org/prettierrc.json": "${workspaceFolder}/.prettierrc.yaml" + } +} +``` diff --git a/docs/help/contributing/010_PULL_REQUEST.md b/docs/help/contributing/010_PULL_REQUEST.md new file mode 100644 index 0000000000..b6ac8dcab2 --- /dev/null +++ b/docs/help/contributing/010_PULL_REQUEST.md @@ -0,0 +1,135 @@ +--- +title: Pull-Request +--- + +# :octicons-git-pull-request-16: Pull-Request + +## pre-requirements + +To follow the steps in this tutorial you will need: + +- [GitHub](https://github.com) account +- [git](https://git-scm.com/downloads) source controll +- Text / Code Editor (personally I preffer + [Visual Studio Code](https://code.visualstudio.com/Download)) +- Terminal: + - If you are on Linux/MacOS you can use bash or zsh + - for Windows Users the commands are written for PowerShell + +## Fork Repository + +The first step to be done if you want to contribute to InvokeAI, is to fork the +rpeository. + +Since you are already reading this doc, the easiest way to do so is by clicking +[here](https://github.com/invoke-ai/InvokeAI/fork). You could also open +[InvokeAI](https://github.com/invoke-ai/InvoekAI) and click on the "Fork" Button +in the top right. + +## Clone your fork + +After you forked the Repository, you should clone it to your dev machine: + +=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS" + + ``` sh + git clone https://github.com//InvokeAI \ + && cd InvokeAI + ``` + +=== ":fontawesome-brands-windows:Windows" + + ``` powershell + git clone https://github.com//InvokeAI ` + && cd InvokeAI + ``` + +## Install in Editable Mode + +To install InvokeAI in editable mode, (as always) we recommend to create and +activate a venv first. Afterwards you can install the InvokeAI Package, +including dev and docs extras in editable mode, follwed by the installation of +the pre-commit hook: + +=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS" + + ``` sh + python -m venv .venv \ + --prompt InvokeAI \ + --upgrade-deps \ + && source .venv/bin/activate \ + && pip install \ + --upgrade-deps \ + --use-pep517 \ + --editable=".[dev,docs]" \ + && pre-commit install + ``` + +=== ":fontawesome-brands-windows:Windows" + + ``` powershell + python -m venv .venv ` + --prompt InvokeAI ` + --upgrade-deps ` + && .venv/scripts/activate.ps1 ` + && pip install ` + --upgrade ` + --use-pep517 ` + --editable=".[dev,docs]" ` + && pre-commit install + ``` + +## Create a branch + +Make sure you are on main branch, from there create your feature branch: + +=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS" + + ``` sh + git checkout main \ + && git pull \ + && git checkout -B + ``` + +=== ":fontawesome-brands-windows:Windows" + + ``` powershell + git checkout main ` + && git pull ` + && git checkout -B + ``` + +## Commit your changes + +When you are done with adding / updating content, you need to commit those +changes to your repository before you can actually open an PR: + +```{ .sh .annotate } +git add # (1)! +git commit -m "A commit message which describes your change" +git push +``` + +1. Replace this with a space seperated list of the files you changed, like: + `README.md foo.sh bar.json baz` + +## Create a Pull Request + +After pushing your changes, you are ready to create a Pull Request. just head +over to your fork on [GitHub](https://github.com), which should already show you +a message that there have been recent changes on your feature branch and a green +button which you could use to create the PR. + +The default target for your PRs would be the main branch of +[invoke-ai/InvokeAI](https://github.com/invoke-ai/InvokeAI) + +Another way would be to create it in VS-Code or via the GitHub CLI (or even via +the GitHub CLI in a VS-Code Terminal Window 🤭): + +```sh +gh pr create +``` + +The CLI will inform you if there are still unpushed commits on your branch. It +will also prompt you for things like the the Title and the Body (Description) if +you did not already pass them as arguments. diff --git a/docs/help/contributing/020_ISSUES.md b/docs/help/contributing/020_ISSUES.md new file mode 100644 index 0000000000..576af82188 --- /dev/null +++ b/docs/help/contributing/020_ISSUES.md @@ -0,0 +1,26 @@ +--- +title: Issues +--- + +# :octicons-issue-opened-16: Issues + +## :fontawesome-solid-bug: Report a bug + +If you stumbled over a bug while using InvokeAI, we would apreciate it a lot if +you +[open a issue](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+) +to inform us about the details so that our developers can look into it. + +If you also know how to fix the bug, take a look [here](010_PULL_REQUEST.md) to +find out how to create a Pull Request. + +## Request a feature + +If you have a idea for a new feature on your mind which you would like to see in +InvokeAI, there is a +[feature request](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+) +available in the issues section of the repository. + +If you are just curious which features already got requested you can find the +overview of open requests +[here](https://github.com/invoke-ai/InvokeAI/labels/enhancement) diff --git a/docs/help/contributing/030_DOCS.md b/docs/help/contributing/030_DOCS.md new file mode 100644 index 0000000000..f4ebfb9df4 --- /dev/null +++ b/docs/help/contributing/030_DOCS.md @@ -0,0 +1,32 @@ +--- +title: docs +--- + +# :simple-readthedocs: MkDocs-Material + +If you want to contribute to the docs, there is a easy way to verify the results +of your changes before commiting them. + +Just follow the steps in the [Pull-Requests](010_PULL_REQUEST.md) docs, there we +already +[create a venv and install the docs extras](010_PULL_REQUEST.md#install-in-editable-mode). +When installed it's as simple as: + +```sh +mkdocs serve +``` + +This will build the docs locally and serve them on your local host, even +auto-refresh is included, so you can just update a doc, save it and tab to the +browser, without the needs of restarting the `mkdocs serve`. + +More information about the "mkdocs flavored markdown syntax" can be found +[here](https://squidfunk.github.io/mkdocs-material/reference/). + +## :material-microsoft-visual-studio-code:VS-Code + +We also provide a +[launch configuration for VS-Code](../IDE-Settings/vs-code.md#launchjson) which +includes a `mkdocs serve` entrypoint as well. You also don't have to worry about +the formatting since this is automated via prettier, but this is of course not +limited to VS-Code. diff --git a/docs/help/contributing/090_NODE_TRANSFORMATION.md b/docs/help/contributing/090_NODE_TRANSFORMATION.md new file mode 100644 index 0000000000..10fa89bd04 --- /dev/null +++ b/docs/help/contributing/090_NODE_TRANSFORMATION.md @@ -0,0 +1,76 @@ +# Tranformation to nodes + +## Current state + +```mermaid +flowchart TD + web[WebUI]; + cli[CLI]; + web --> |img2img| generate(generate); + web --> |txt2img| generate(generate); + cli --> |txt2img| generate(generate); + cli --> |img2img| generate(generate); + generate --> model_manager; + generate --> generators; + generate --> ti_manager[TI Manager]; + generate --> etc; +``` + +## Transitional Architecture + +### first step + +```mermaid +flowchart TD + web[WebUI]; + cli[CLI]; + web --> |img2img| img2img_node(Img2img node); + web --> |txt2img| generate(generate); + img2img_node --> model_manager; + img2img_node --> generators; + cli --> |txt2img| generate; + cli --> |img2img| generate; + generate --> model_manager; + generate --> generators; + generate --> ti_manager[TI Manager]; + generate --> etc; +``` + +### second step + +```mermaid +flowchart TD + web[WebUI]; + cli[CLI]; + web --> |img2img| img2img_node(img2img node); + img2img_node --> model_manager; + img2img_node --> generators; + web --> |txt2img| txt2img_node(txt2img node); + cli --> |txt2img| txt2img_node; + cli --> |img2img| generate(generate); + generate --> model_manager; + generate --> generators; + generate --> ti_manager[TI Manager]; + generate --> etc; + txt2img_node --> model_manager; + txt2img_node --> generators; + txt2img_node --> ti_manager[TI Manager]; +``` + +## Final Architecture + +```mermaid +flowchart TD + web[WebUI]; + cli[CLI]; + web --> |img2img|img2img_node(img2img node); + cli --> |img2img|img2img_node; + web --> |txt2img|txt2img_node(txt2img node); + cli --> |txt2img|txt2img_node; + img2img_node --> model_manager; + txt2img_node --> model_manager; + img2img_node --> generators; + txt2img_node --> generators; + img2img_node --> ti_manager[TI Manager]; + txt2img_node --> ti_manager[TI Manager]; +``` diff --git a/docs/help/contributing/index.md b/docs/help/contributing/index.md new file mode 100644 index 0000000000..9e33003ef2 --- /dev/null +++ b/docs/help/contributing/index.md @@ -0,0 +1,16 @@ +--- +title: Contributing +--- + +# :fontawesome-solid-code-commit: Contributing + +There are different ways how you can contribute to +[InvokeAI](https://github.com/invoke-ai/InvokeAI), like Translations, opening +Issues for Bugs or ideas how to improve. + +This Section of the docs will explain some of the different ways of how you can +contribute to make it easier for newcommers as well as advanced users :nerd: + +If you want to contribute code, but you do not have an exact idea yet, take a +look at the currently open +[:fontawesome-solid-bug: Bug Reports](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug) diff --git a/docs/help/index.md b/docs/help/index.md new file mode 100644 index 0000000000..fa56264486 --- /dev/null +++ b/docs/help/index.md @@ -0,0 +1,12 @@ +# :material-help:Help + +If you are looking for help with the installation of InvokeAI, please take a +look into the [Installation](../installation/index.md) section of the docs. + +Here you will find help to topics like + +- how to contribute +- configuration recommendation for IDEs + +If you have an Idea about what's missing and aren't scared from contributing, +just take a look at [DOCS](./contributing/030_DOCS.md) to find out how to do so. diff --git a/docs/index.md b/docs/index.md index 4587b08f18..ab89434c55 100644 --- a/docs/index.md +++ b/docs/index.md @@ -2,6 +2,8 @@ title: Home --- +# :octicons-home-16: Home + + ### The InvokeAI Command Line Interface -- [Command Line Interace Reference Guide](features/CLI.md) + +- [Command Line Interace Reference Guide](features/CLI.md) + ### Image Management -- [Image2Image](features/IMG2IMG.md) -- [Inpainting](features/INPAINTING.md) -- [Outpainting](features/OUTPAINTING.md) -- [Adding custom styles and subjects](features/CONCEPTS.md) -- [Upscaling and Face Reconstruction](features/POSTPROCESS.md) -- [Embiggen upscaling](features/EMBIGGEN.md) -- [Other Features](features/OTHER.md) + +- [Image2Image](features/IMG2IMG.md) +- [Inpainting](features/INPAINTING.md) +- [Outpainting](features/OUTPAINTING.md) +- [Adding custom styles and subjects](features/CONCEPTS.md) +- [Upscaling and Face Reconstruction](features/POSTPROCESS.md) +- [Embiggen upscaling](features/EMBIGGEN.md) +- [Other Features](features/OTHER.md) + ### Model Management -- [Installing](installation/050_INSTALLING_MODELS.md) -- [Model Merging](features/MODEL_MERGING.md) -- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md) -- [Textual Inversion](features/TEXTUAL_INVERSION.md) -- [Not Safe for Work (NSFW) Checker](features/NSFW.md) + +- [Installing](installation/050_INSTALLING_MODELS.md) +- [Model Merging](features/MODEL_MERGING.md) +- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md) +- [Textual Inversion](features/TEXTUAL_INVERSION.md) +- [Not Safe for Work (NSFW) Checker](features/NSFW.md) + ### Prompt Engineering -- [Prompt Syntax](features/PROMPTS.md) -- [Generating Variations](features/VARIATIONS.md) + +- [Prompt Syntax](features/PROMPTS.md) +- [Generating Variations](features/VARIATIONS.md) ## :octicons-log-16: Latest Changes @@ -162,84 +181,188 @@ This method is recommended for those familiar with running Docker containers #### Migration to Stable Diffusion `diffusers` models -Previous versions of InvokeAI supported the original model file format introduced with Stable Diffusion 1.4. In the original format, known variously as "checkpoint", or "legacy" format, there is a single large weights file ending with `.ckpt` or `.safetensors`. Though this format has served the community well, it has a number of disadvantages, including file size, slow loading times, and a variety of non-standard variants that require special-case code to handle. In addition, because checkpoint files are actually a bundle of multiple machine learning sub-models, it is hard to swap different sub-models in and out, or to share common sub-models. A new format, introduced by the StabilityAI company in collaboration with HuggingFace, is called `diffusers` and consists of a directory of individual models. The most immediate benefit of `diffusers` is that they load from disk very quickly. A longer term benefit is that in the near future `diffusers` models will be able to share common sub-models, dramatically reducing disk space when you have multiple fine-tune models derived from the same base. +Previous versions of InvokeAI supported the original model file format +introduced with Stable Diffusion 1.4. In the original format, known variously as +"checkpoint", or "legacy" format, there is a single large weights file ending +with `.ckpt` or `.safetensors`. Though this format has served the community +well, it has a number of disadvantages, including file size, slow loading times, +and a variety of non-standard variants that require special-case code to handle. +In addition, because checkpoint files are actually a bundle of multiple machine +learning sub-models, it is hard to swap different sub-models in and out, or to +share common sub-models. A new format, introduced by the StabilityAI company in +collaboration with HuggingFace, is called `diffusers` and consists of a +directory of individual models. The most immediate benefit of `diffusers` is +that they load from disk very quickly. A longer term benefit is that in the near +future `diffusers` models will be able to share common sub-models, dramatically +reducing disk space when you have multiple fine-tune models derived from the +same base. -When you perform a new install of version 2.3.0, you will be offered the option to install the `diffusers` versions of a number of popular SD models, including Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of 2.1). These will act and work just like the checkpoint versions. Do not be concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk! InvokeAI 2.3.0 can still load these and generate images from them without any extra intervention on your part. +When you perform a new install of version 2.3.0, you will be offered the option +to install the `diffusers` versions of a number of popular SD models, including +Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of +2.1). These will act and work just like the checkpoint versions. Do not be +concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk! +InvokeAI 2.3.0 can still load these and generate images from them without any +extra intervention on your part. -To take advantage of the optimized loading times of `diffusers` models, InvokeAI offers options to convert legacy checkpoint models into optimized `diffusers` models. If you use the `invokeai` command line interface, the relevant commands are: +To take advantage of the optimized loading times of `diffusers` models, InvokeAI +offers options to convert legacy checkpoint models into optimized `diffusers` +models. If you use the `invokeai` command line interface, the relevant commands +are: -* `!convert_model` -- Take the path to a local checkpoint file or a URL that is pointing to one, convert it into a `diffusers` model, and import it into InvokeAI's models registry file. -* `!optimize_model` -- If you already have a checkpoint model in your InvokeAI models file, this command will accept its short name and convert it into a like-named `diffusers` model, optionally deleting the original checkpoint file. -* `!import_model` -- Take the local path of either a checkpoint file or a `diffusers` model directory and import it into InvokeAI's registry file. You may also provide the ID of any diffusers model that has been published on the [HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads) and it will be downloaded and installed automatically. +- `!convert_model` -- Take the path to a local checkpoint file or a URL that + is pointing to one, convert it into a `diffusers` model, and import it into + InvokeAI's models registry file. +- `!optimize_model` -- If you already have a checkpoint model in your InvokeAI + models file, this command will accept its short name and convert it into a + like-named `diffusers` model, optionally deleting the original checkpoint + file. +- `!import_model` -- Take the local path of either a checkpoint file or a + `diffusers` model directory and import it into InvokeAI's registry file. You + may also provide the ID of any diffusers model that has been published on + the + [HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads) + and it will be downloaded and installed automatically. The WebGUI offers similar functionality for model management. -For advanced users, new command-line options provide additional functionality. Launching `invokeai` with the argument `--autoconvert ` takes the path to a directory of checkpoint files, automatically converts them into `diffusers` models and imports them. Each time the script is launched, the directory will be scanned for new checkpoint files to be loaded. Alternatively, the `--ckpt_convert` argument will cause any checkpoint or safetensors model that is already registered with InvokeAI to be converted into a `diffusers` model on the fly, allowing you to take advantage of future diffusers-only features without explicitly converting the model and saving it to disk. +For advanced users, new command-line options provide additional functionality. +Launching `invokeai` with the argument `--autoconvert ` takes +the path to a directory of checkpoint files, automatically converts them into +`diffusers` models and imports them. Each time the script is launched, the +directory will be scanned for new checkpoint files to be loaded. Alternatively, +the `--ckpt_convert` argument will cause any checkpoint or safetensors model +that is already registered with InvokeAI to be converted into a `diffusers` +model on the fly, allowing you to take advantage of future diffusers-only +features without explicitly converting the model and saving it to disk. -Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management in both the command-line and Web interfaces. +Please see +[INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) +for more information on model management in both the command-line and Web +interfaces. #### Support for the `XFormers` Memory-Efficient Crossattention Package -On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once installed, the`xformers` package dramatically reduces the memory footprint of loaded Stable Diffusion models files and modestly increases image generation speed. `xformers` will be installed and activated automatically if you specify a CUDA system at install time. +On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once +installed, the`xformers` package dramatically reduces the memory footprint of +loaded Stable Diffusion models files and modestly increases image generation +speed. `xformers` will be installed and activated automatically if you specify a +CUDA system at install time. -The caveat with using `xformers` is that it introduces slightly non-deterministic behavior, and images generated using the same seed and other settings will be subtly different between invocations. Generally the changes are unnoticeable unless you rapidly shift back and forth between images, but to disable `xformers` and restore fully deterministic behavior, you may launch InvokeAI using the `--no-xformers` option. This is most conveniently done by opening the file `invokeai/invokeai.init` with a text editor, and adding the line `--no-xformers` at the bottom. +The caveat with using `xformers` is that it introduces slightly +non-deterministic behavior, and images generated using the same seed and other +settings will be subtly different between invocations. Generally the changes are +unnoticeable unless you rapidly shift back and forth between images, but to +disable `xformers` and restore fully deterministic behavior, you may launch +InvokeAI using the `--no-xformers` option. This is most conveniently done by +opening the file `invokeai/invokeai.init` with a text editor, and adding the +line `--no-xformers` at the bottom. #### A Negative Prompt Box in the WebUI -There is now a separate text input box for negative prompts in the WebUI. This is convenient for stashing frequently-used negative prompts ("mangled limbs, bad anatomy"). The `[negative prompt]` syntax continues to work in the main prompt box as well. +There is now a separate text input box for negative prompts in the WebUI. This +is convenient for stashing frequently-used negative prompts ("mangled limbs, bad +anatomy"). The `[negative prompt]` syntax continues to work in the main prompt +box as well. -To see exactly how your prompts are being parsed, launch `invokeai` with the `--log_tokenization` option. The console window will then display the tokenization process for both positive and negative prompts. +To see exactly how your prompts are being parsed, launch `invokeai` with the +`--log_tokenization` option. The console window will then display the +tokenization process for both positive and negative prompts. #### Model Merging -Version 2.3.0 offers an intuitive user interface for merging up to three Stable Diffusion models using an intuitive user interface. Model merging allows you to mix the behavior of models to achieve very interesting effects. To use this, each of the models must already be imported into InvokeAI and saved in `diffusers` format, then launch the merger using a new menu item in the InvokeAI launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line with `invokeai-merge --gui`. You will be prompted to select the models to merge, the proportions in which to mix them, and the mixing algorithm. The script will create a new merged `diffusers` model and import it into InvokeAI for your use. +Version 2.3.0 offers an intuitive user interface for merging up to three Stable +Diffusion models using an intuitive user interface. Model merging allows you to +mix the behavior of models to achieve very interesting effects. To use this, +each of the models must already be imported into InvokeAI and saved in +`diffusers` format, then launch the merger using a new menu item in the InvokeAI +launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line +with `invokeai-merge --gui`. You will be prompted to select the models to merge, +the proportions in which to mix them, and the mixing algorithm. The script will +create a new merged `diffusers` model and import it into InvokeAI for your use. -See [MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/) for more details. +See +[MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/) +for more details. #### Textual Inversion Training -Textual Inversion (TI) is a technique for training a Stable Diffusion model to emit a particular subject or style when triggered by a keyword phrase. You can perform TI training by placing a small number of images of the subject or style in a directory, and choosing a distinctive trigger phrase, such as "pointillist-style". After successful training, The subject or style will be activated by including `` in your prompt. +Textual Inversion (TI) is a technique for training a Stable Diffusion model to +emit a particular subject or style when triggered by a keyword phrase. You can +perform TI training by placing a small number of images of the subject or style +in a directory, and choosing a distinctive trigger phrase, such as +"pointillist-style". After successful training, The subject or style will be +activated by including `` in your prompt. -Previous versions of InvokeAI were able to perform TI, but it required using a command-line script with dozens of obscure command-line arguments. Version 2.3.0 features an intuitive TI frontend that will build a TI model on top of any `diffusers` model. To access training you can launch from a new item in the launcher script or from the command line using `invokeai-ti --gui`. +Previous versions of InvokeAI were able to perform TI, but it required using a +command-line script with dozens of obscure command-line arguments. Version 2.3.0 +features an intuitive TI frontend that will build a TI model on top of any +`diffusers` model. To access training you can launch from a new item in the +launcher script or from the command line using `invokeai-ti --gui`. -See [TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) for further details. +See +[TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) +for further details. #### A New Installer Experience -The InvokeAI installer has been upgraded in order to provide a smoother and hopefully more glitch-free experience. In addition, InvokeAI is now packaged as a PyPi project, allowing developers and power-users to install InvokeAI with the command `pip install InvokeAI --use-pep517`. Please see [Installation](#installation) for details. +The InvokeAI installer has been upgraded in order to provide a smoother and +hopefully more glitch-free experience. In addition, InvokeAI is now packaged as +a PyPi project, allowing developers and power-users to install InvokeAI with the +command `pip install InvokeAI --use-pep517`. Please see +[Installation](#installation) for details. -Developers should be aware that the `pip` installation procedure has been simplified and that the `conda` method is no longer supported at all. Accordingly, the `environments_and_requirements` directory has been deleted from the repository. +Developers should be aware that the `pip` installation procedure has been +simplified and that the `conda` method is no longer supported at all. +Accordingly, the `environments_and_requirements` directory has been deleted from +the repository. #### Command-line name changes -All of InvokeAI's functionality, including the WebUI, command-line interface, textual inversion training and model merging, can all be accessed from the `invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been expanded to add the new functionality. For the convenience of developers and power users, we have normalized the names of the InvokeAI command-line scripts: +All of InvokeAI's functionality, including the WebUI, command-line interface, +textual inversion training and model merging, can all be accessed from the +`invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been +expanded to add the new functionality. For the convenience of developers and +power users, we have normalized the names of the InvokeAI command-line scripts: -* `invokeai` -- Command-line client -* `invokeai --web` -- Web GUI -* `invokeai-merge --gui` -- Model merging script with graphical front end -* `invokeai-ti --gui` -- Textual inversion script with graphical front end -* `invokeai-configure` -- Configuration tool for initializing the `invokeai` directory and selecting popular starter models. +- `invokeai` -- Command-line client +- `invokeai --web` -- Web GUI +- `invokeai-merge --gui` -- Model merging script with graphical front end +- `invokeai-ti --gui` -- Textual inversion script with graphical front end +- `invokeai-configure` -- Configuration tool for initializing the `invokeai` + directory and selecting popular starter models. -For backward compatibility, the old command names are also recognized, including `invoke.py` and `configure-invokeai.py`. However, these are deprecated and will eventually be removed. +For backward compatibility, the old command names are also recognized, including +`invoke.py` and `configure-invokeai.py`. However, these are deprecated and will +eventually be removed. -Developers should be aware that the locations of the script's source code has been moved. The new locations are: -* `invokeai` => `ldm/invoke/CLI.py` -* `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py` -* `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py` -* `invokeai-merge` => `ldm/invoke/merge_diffusers` +Developers should be aware that the locations of the script's source code has +been moved. The new locations are: -Developers are strongly encouraged to perform an "editable" install of InvokeAI using `pip install -e . --use-pep517` in the Git repository, and then to call the scripts using their 2.3.0 names, rather than executing the scripts directly. Developers should also be aware that the several important data files have been relocated into a new directory named `invokeai`. This includes the WebGUI's `frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used by the installer to select starter models. Eventually all InvokeAI modules will be in subdirectories of `invokeai`. +- `invokeai` => `ldm/invoke/CLI.py` +- `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py` +- `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py` +- `invokeai-merge` => `ldm/invoke/merge_diffusers` -Please see [2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0) for further details. -For older changelogs, please visit the +Developers are strongly encouraged to perform an "editable" install of InvokeAI +using `pip install -e . --use-pep517` in the Git repository, and then to call +the scripts using their 2.3.0 names, rather than executing the scripts directly. +Developers should also be aware that the several important data files have been +relocated into a new directory named `invokeai`. This includes the WebGUI's +`frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used +by the installer to select starter models. Eventually all InvokeAI modules will +be in subdirectories of `invokeai`. + +Please see +[2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0) +for further details. For older changelogs, please visit the **[CHANGELOG](CHANGELOG/#v223-2-december-2022)**. ## :material-target: Troubleshooting -Please check out our **[:material-frequently-asked-questions: -Troubleshooting -Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)** to -get solutions for common installation problems and other issues. +Please check out our +**[:material-frequently-asked-questions: Troubleshooting Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)** +to get solutions for common installation problems and other issues. ## :octicons-repo-push-24: Contributing @@ -265,8 +388,8 @@ thank them for their time, hard work and effort. For support, please use this repository's GitHub Issues tracking service. Feel free to send me an email if you use and like the script. -Original portions of the software are Copyright (c) 2022-23 -by [The InvokeAI Team](https://github.com/invoke-ai). +Original portions of the software are Copyright (c) 2022-23 by +[The InvokeAI Team](https://github.com/invoke-ai). ## :octicons-book-24: Further Reading diff --git a/docs/requirements-mkdocs.txt b/docs/requirements-mkdocs.txt deleted file mode 100644 index a637622954..0000000000 --- a/docs/requirements-mkdocs.txt +++ /dev/null @@ -1,5 +0,0 @@ -mkdocs -mkdocs-material>=8, <9 -mkdocs-git-revision-date-localized-plugin -mkdocs-redirects==1.2.0 - diff --git a/mkdocs.yml b/mkdocs.yml index ebd9ec0acf..0e9bf5687a 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -2,14 +2,14 @@ # General site_name: InvokeAI Stable Diffusion Toolkit Docs -site_url: https://invoke-ai.github.io/InvokeAI +site_url: !ENV [SITE_URL, 'https://invoke-ai.github.io/InvokeAI'] site_author: mauwii dev_addr: '127.0.0.1:8080' # Repository -repo_name: 'invoke-ai/InvokeAI' -repo_url: 'https://github.com/invoke-ai/InvokeAI' -edit_uri: edit/main/docs/ +repo_name: !ENV [REPO_NAME, 'invoke-ai/InvokeAI'] +repo_url: !ENV [REPO_URL, 'https://github.com/invoke-ai/InvokeAI'] +edit_uri: blob/main/docs/ # Copyright copyright: Copyright © 2022 InvokeAI Team @@ -19,7 +19,8 @@ theme: name: material icon: repo: fontawesome/brands/github - edit: material/file-document-edit-outline + edit: material/pencil + view: material/eye palette: - media: '(prefers-color-scheme: light)' scheme: default @@ -33,6 +34,11 @@ theme: icon: material/lightbulb-outline name: Switch to light mode features: + - content.action.edit + - content.action.view + - content.code.copy + - content.tabs.link + - navigation.indexes - navigation.instant - navigation.tabs - navigation.top @@ -89,9 +95,9 @@ plugins: enable_creation_date: true - redirects: redirect_maps: - 'installation/INSTALL_AUTOMATED.md': 'installation/010_INSTALL_AUTOMATED.md' - 'installation/INSTALL_MANUAL.md': 'installation/020_INSTALL_MANUAL.md' - 'installation/INSTALL_SOURCE.md': 'installation/020_INSTALL_MANUAL.md' - 'installation/INSTALL_DOCKER.md': 'installation/040_INSTALL_DOCKER.md' - 'installation/INSTALLING_MODELS.md': 'installation/050_INSTALLING_MODELS.md' - 'installation/INSTALL_PATCHMATCH.md': 'installation/060_INSTALL_PATCHMATCH.md' + 'installation/INSTALL_AUTOMATED.md': 'installation/010_INSTALL_AUTOMATED.md' + 'installation/INSTALL_MANUAL.md': 'installation/020_INSTALL_MANUAL.md' + 'installation/INSTALL_SOURCE.md': 'installation/020_INSTALL_MANUAL.md' + 'installation/INSTALL_DOCKER.md': 'installation/040_INSTALL_DOCKER.md' + 'installation/INSTALLING_MODELS.md': 'installation/050_INSTALLING_MODELS.md' + 'installation/INSTALL_PATCHMATCH.md': 'installation/060_INSTALL_PATCHMATCH.md' diff --git a/pyproject.toml b/pyproject.toml index 6357d25653..6b866f80ae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,43 +1,37 @@ [build-system] -requires = ["setuptools~=65.5", "pip~=22.3", "wheel"] build-backend = "setuptools.build_meta" +requires = ["setuptools ~= 67.1", "wheel"] [project] -name = "InvokeAI" -description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process" -requires-python = ">=3.9, <3.11" -readme = { content-type = "text/markdown", file = "README.md" } -keywords = ["stable-diffusion", "AI"] -dynamic = ["version"] -license = { file = "LICENSE" } -authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }] +authors = [{name = "The InvokeAI Project", email = "lincoln.stein@gmail.com"}] classifiers = [ - 'Development Status :: 4 - Beta', - 'Environment :: GPU', - 'Environment :: GPU :: NVIDIA CUDA', - 'Environment :: MacOS X', - 'Intended Audience :: End Users/Desktop', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: MIT License', - 'Operating System :: POSIX :: Linux', - 'Operating System :: MacOS', - 'Operating System :: Microsoft :: Windows', - 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Topic :: Artistic Software', - 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', - 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server', - 'Topic :: Multimedia :: Graphics', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', - 'Topic :: Scientific/Engineering :: Image Processing', + "Development Status :: 4 - Beta", + "Environment :: GPU :: NVIDIA CUDA", + "Environment :: GPU", + "Environment :: MacOS X", + "Intended Audience :: Developers", + "Intended Audience :: End Users/Desktop", + "License :: OSI Approved :: MIT License", + "Operating System :: MacOS", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python", + "Topic :: Artistic Software", + "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", + "Topic :: Internet :: WWW/HTTP :: WSGI :: Server", + "Topic :: Multimedia :: Graphics", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Image Processing", ] dependencies = [ "accelerate", "albumentations", "click", - "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", + "clip_anytorch", "compel==0.1.7", "datasets", "diffusers[torch]~=0.13", @@ -54,7 +48,7 @@ dependencies = [ "huggingface-hub>=0.11.1", "imageio", "imageio-ffmpeg", - "k-diffusion", # replacing "k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip", + "k-diffusion", "kornia", "npyscreen", "numpy<1.24", @@ -62,8 +56,8 @@ dependencies = [ "opencv-python", "picklescan", "pillow", - "pudb", "prompt-toolkit", + "pudb", "pypatchmatch", "pyreadline3", "pytorch-lightning==1.7.7", @@ -75,62 +69,116 @@ dependencies = [ "streamlit", "taming-transformers-rom1504", "test-tube>=0.7.5", - "torch>=1.13.1", "torch-fidelity", - "torchvision>=0.14.1", + "torch>=1.13.1", "torchmetrics", + "torchvision>=0.14.1", "transformers~=4.25", "windows-curses; sys_platform=='win32'", ] +description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process" +dynamic = ["version"] +keywords = ["AI", "stable-diffusion"] +license = {text = "MIT"} +name = "InvokeAI" +readme = {content-type = "text/markdown", file = "README.md"} +requires-python = ">=3.9, <3.11" [project.optional-dependencies] +"dev" = [ + "black[jupyter]", + "flake8", + "flake8-black", + "flake8-bugbear", + "isort", + "pre-commit", +] "dist" = ["pip-tools", "pipdeptree", "twine"] "docs" = [ - "mkdocs-material<9.0", "mkdocs-git-revision-date-localized-plugin", + "mkdocs-material==9.*", "mkdocs-redirects==1.2.0", ] -"test" = ["pytest>6.0.0", "pytest-cov"] +"test" = ["pytest-cov", "pytest>6.0.0"] "xformers" = [ - "xformers~=0.0.16; sys_platform!='darwin'", - "triton; sys_platform=='linux'", + "triton; sys_platform=='linux'", + "xformers~=0.0.16; sys_platform!='darwin'", ] [project.scripts] # legacy entrypoints; provided for backwards compatibility -"invoke.py" = "ldm.invoke.CLI:main" "configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main" -"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main" +"invoke.py" = "ldm.invoke.CLI:main" "merge_embeddings.py" = "ldm.invoke.merge_diffusers:main" +"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main" # modern entrypoints "invokeai" = "ldm.invoke.CLI:main" "invokeai-configure" = "ldm.invoke.config.invokeai_configure:main" -"invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging +"invokeai-merge" = "ldm.invoke.merge_diffusers:main" "invokeai-ti" = "ldm.invoke.training.textual_inversion:main" -"invokeai-model-install" = "ldm.invoke.config.model_install:main" -"invokeai-update" = "ldm.invoke.config.invokeai_update:main" [project.urls] -"Homepage" = "https://invoke-ai.github.io/InvokeAI/" -"Documentation" = "https://invoke-ai.github.io/InvokeAI/" -"Source" = "https://github.com/invoke-ai/InvokeAI/" "Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues" "Discord" = "https://discord.gg/ZmtBAhwWhy" +"Documentation" = "https://invoke-ai.github.io/InvokeAI/" +"Homepage" = "https://invoke-ai.github.io/InvokeAI/" +"Source" = "https://github.com/invoke-ai/InvokeAI/" + +[tool.setuptools] +license-files = ["LICENSE"] [tool.setuptools.dynamic] -version = { attr = "ldm.invoke.__version__" } +version = {attr = "ldm.invoke.__version__"} [tool.setuptools.packages.find] +"include" = [ + "invokeai.assets.web", + "invokeai.backend*", + "invokeai.configs*", + "invokeai.frontend.dist*", + "ldm*", +] "where" = ["."] -"include" = ["invokeai.assets.web*", "invokeai.backend*", "invokeai.frontend.dist*", "invokeai.configs*", "ldm*"] [tool.setuptools.package-data] "invokeai.assets.web" = ["**.png"] -"invokeai.backend" = ["**.png"] -"invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"] +"invokeai.configs" = ["**.example", "**.txt", "**.yaml"] "invokeai.frontend.dist" = ["**"] +[tool.black] +extend-exclude = ''' +/( + # skip legacy scripts + | scripts/orig_scripts +)/ +''' +line-length = 88 +target-version = ['py39'] + +[tool.isort] +atomic = true +extend_skip_glob = ["scripts/orig_scripts/*"] +filter_files = true +line_length = 120 +profile = "black" +py_version = 39 +remove_redundant_aliases = true +skip_gitignore = true +src_paths = ["installer", "invokeai", "ldm", "tests"] +virtual_env = ".venv" + +[tool.coverage.run] +branch = true +parallel = true + +[tool.coverage.report] +skip_covered = true +skip_empty = true + +[tool.coverage.paths] +source = ["invokeai/backend", "ldm/invoke"] + [tool.pytest.ini_options] -addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov-report=term:skip-covered --cov=ldm/invoke --cov=backend --cov-branch" +addopts = ["--cov=invokeai/backend", "--cov=ldm/invoke"]