Merge branch 'v2.3' into enhance/simple-param-scanner-script

This commit is contained in:
Lincoln Stein 2023-03-02 08:11:57 -05:00 committed by GitHub
commit 3c64fad379
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 1073 additions and 201 deletions

View File

@ -1,5 +1,8 @@
root = true
# All files # All files
[*] [*]
max_line_length = 80
charset = utf-8 charset = utf-8
end_of_line = lf end_of_line = lf
indent_size = 2 indent_size = 2
@ -10,3 +13,18 @@ trim_trailing_whitespace = true
# Python # Python
[*.py] [*.py]
indent_size = 4 indent_size = 4
max_line_length = 120
# css
[*.css]
indent_size = 4
# flake8
[.flake8]
indent_size = 4
# Markdown MkDocs
[docs/**/*.md]
max_line_length = 80
indent_size = 4
indent_style = unset

37
.flake8 Normal file
View File

@ -0,0 +1,37 @@
[flake8]
max-line-length = 120
extend-ignore =
# See https://github.com/PyCQA/pycodestyle/issues/373
E203,
# use Bugbear's B950 instead
E501,
# from black repo https://github.com/psf/black/blob/main/.flake8
E266, W503, B907
extend-select =
# Bugbear line length
B950
extend-exclude =
scripts/orig_scripts/*
ldm/models/*
ldm/modules/*
ldm/data/*
ldm/generate.py
ldm/util.py
ldm/simplet2i.py
per-file-ignores =
# B950 line too long
# W605 invalid escape sequence
# F841 assigned to but never used
# F401 imported but unused
tests/test_prompt_parser.py: B950, W605, F401
tests/test_textual_inversion.py: F841, B950
# B023 Function definition does not bind loop variable
scripts/legacy_api.py: F401, B950, B023, F841
ldm/invoke/__init__.py: F401
# B010 Do not call setattr with a constant attribute value
ldm/invoke/server_legacy.py: B010
# =====================
# flake-quote settings:
# =====================
# Set this to match black style:
inline-quotes = double

View File

@ -9,6 +9,10 @@ jobs:
mkdocs-material: mkdocs-material:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
runs-on: ubuntu-latest runs-on: ubuntu-latest
env:
REPO_URL: '${{ github.server_url }}/${{ github.repository }}'
REPO_NAME: '${{ github.repository }}'
SITE_URL: 'https://${{ github.repository_owner }}.github.io/InvokeAI'
steps: steps:
- name: checkout sources - name: checkout sources
uses: actions/checkout@v3 uses: actions/checkout@v3
@ -19,11 +23,15 @@ jobs:
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: '3.10' python-version: '3.10'
cache: pip
cache-dependency-path: pyproject.toml
- name: install requirements - name: install requirements
env:
PIP_USE_PEP517: 1
run: | run: |
python -m \ python -m \
pip install -r docs/requirements-mkdocs.txt pip install ".[docs]"
- name: confirm buildability - name: confirm buildability
run: | run: |

41
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,41 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/psf/black
rev: 23.1.0
hooks:
- id: black
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
hooks:
- id: flake8
additional_dependencies:
- flake8-black
- flake8-bugbear
- flake8-comprehensions
- flake8-simplify
- repo: https://github.com/pre-commit/mirrors-prettier
rev: 'v3.0.0-alpha.4'
hooks:
- id: prettier
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
- id: check-merge-conflict
- id: check-symlinks
- id: check-toml
- id: end-of-file-fixer
- id: no-commit-to-branch
args: ['--branch', 'main']
- id: trailing-whitespace

14
.prettierignore Normal file
View File

@ -0,0 +1,14 @@
invokeai/frontend/.husky
invokeai/frontend/patches
# Ignore artifacts:
build
coverage
static
invokeai/frontend/dist
# Ignore all HTML files:
*.html
# Ignore deprecated docs
docs/installation/deprecated_documentation

View File

@ -1,9 +1,9 @@
endOfLine: lf
tabWidth: 2
useTabs: false
singleQuote: true
quoteProps: as-needed
embeddedLanguageFormatting: auto embeddedLanguageFormatting: auto
endOfLine: lf
singleQuote: true
semi: true
trailingComma: es5
useTabs: false
overrides: overrides:
- files: '*.md' - files: '*.md'
options: options:
@ -11,3 +11,9 @@ overrides:
printWidth: 80 printWidth: 80
parser: markdown parser: markdown
cursorOffset: -1 cursorOffset: -1
- files: docs/**/*.md
options:
tabWidth: 4
- files: 'invokeai/frontend/public/locales/*.json'
options:
tabWidth: 4

5
docs/.markdownlint.jsonc Normal file
View File

@ -0,0 +1,5 @@
{
"MD046": false,
"MD007": false,
"MD030": false
}

View File

@ -2,62 +2,82 @@
title: Overview title: Overview
--- ---
Here you can find the documentation for InvokeAI's various features. - The Basics
## The Basics - The [Web User Interface](WEB.md)
### * The [Web User Interface](WEB.md)
Guide to the Web interface. Also see the [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
### * The [Unified Canvas](UNIFIED_CANVAS.md) Guide to the Web interface. Also see the
Build complex scenes by combine and modifying multiple images in a stepwise [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
fashion. This feature combines img2img, inpainting and outpainting in
a single convenient digital artist-optimized user interface. - The [Unified Canvas](UNIFIED_CANVAS.md)
Build complex scenes by combine and modifying multiple images in a
stepwise fashion. This feature combines img2img, inpainting and
outpainting in a single convenient digital artist-optimized user
interface.
- The [Command Line Interface (CLI)](CLI.md)
### * The [Command Line Interface (CLI)](CLI.md)
Scriptable access to InvokeAI's features. Scriptable access to InvokeAI's features.
## Image Generation - Image Generation
### * [Prompt Engineering](PROMPTS.md)
- [Prompt Engineering](PROMPTS.md)
Get the images you want with the InvokeAI prompt engineering language. Get the images you want with the InvokeAI prompt engineering language.
## * [Post-Processing](POSTPROCESS.md) - [Post-Processing](POSTPROCESS.md)
Restore mangled faces and make images larger with upscaling. Also see the [Embiggen Upscaling Guide](EMBIGGEN.md).
## * The [Concepts Library](CONCEPTS.md) Restore mangled faces and make images larger with upscaling. Also see
Add custom subjects and styles using HuggingFace's repository of embeddings. the [Embiggen Upscaling Guide](EMBIGGEN.md).
- The [Concepts Library](CONCEPTS.md)
Add custom subjects and styles using HuggingFace's repository of
embeddings.
- [Image-to-Image Guide for the CLI](IMG2IMG.md)
### * [Image-to-Image Guide for the CLI](IMG2IMG.md)
Use a seed image to build new creations in the CLI. Use a seed image to build new creations in the CLI.
### * [Inpainting Guide for the CLI](INPAINTING.md) - [Inpainting Guide for the CLI](INPAINTING.md)
Selectively erase and replace portions of an existing image in the CLI. Selectively erase and replace portions of an existing image in the CLI.
### * [Outpainting Guide for the CLI](OUTPAINTING.md) - [Outpainting Guide for the CLI](OUTPAINTING.md)
Extend the borders of the image with an "outcrop" function within the CLI.
### * [Generating Variations](VARIATIONS.md) Extend the borders of the image with an "outcrop" function within the
Have an image you like and want to generate many more like it? Variations CLI.
are the ticket.
## Model Management - [Generating Variations](VARIATIONS.md)
## * [Model Installation](../installation/050_INSTALLING_MODELS.md) Have an image you like and want to generate many more like it?
Learn how to import third-party models and switch among them. This Variations are the ticket.
guide also covers optimizing models to load quickly.
## * [Merging Models](MODEL_MERGING.md) - Model Management
Teach an old model new tricks. Merge 2-3 models together to create a
new model that combines characteristics of the originals. - [Model Installation](../installation/050_INSTALLING_MODELS.md)
Learn how to import third-party models and switch among them. This guide
also covers optimizing models to load quickly.
- [Merging Models](MODEL_MERGING.md)
Teach an old model new tricks. Merge 2-3 models together to create a new
model that combines characteristics of the originals.
- [Textual Inversion](TEXTUAL_INVERSION.md)
## * [Textual Inversion](TEXTUAL_INVERSION.md)
Personalize models by adding your own style or subjects. Personalize models by adding your own style or subjects.
# Other Features - Other Features
- [The NSFW Checker](NSFW.md)
## * [The NSFW Checker](NSFW.md)
Prevent InvokeAI from displaying unwanted racy images. Prevent InvokeAI from displaying unwanted racy images.
## * [Miscellaneous](OTHER.md) - [Miscellaneous](OTHER.md)
Run InvokeAI on Google Colab, generate images with repeating patterns, Run InvokeAI on Google Colab, generate images with repeating patterns,
batch process a file of prompts, increase the "creativity" of image batch process a file of prompts, increase the "creativity" of image
generation by adding initial noise, and more! generation by adding initial noise, and more!

View File

@ -0,0 +1,4 @@
# :octicons-file-code-16: IDE-Settings
Here we will share settings for IDEs used by our developers, maybe you can find
something interestening which will help to boost your development efficency 🔥

View File

@ -0,0 +1,250 @@
---
title: Visual Studio Code
---
# :material-microsoft-visual-studio-code:Visual Studio Code
The Workspace Settings are stored in the project (repository) root and get
higher priorized than your user settings.
This helps to have different settings for different projects, while the user
settings get used as a default value if no workspace settings are provided.
## tasks.json
First we will create a task configuration which will create a virtual
environment and update the deps (pip, setuptools and wheel).
Into this venv we will then install the pyproject.toml in editable mode with
dev, docs and test dependencies.
```json title=".vscode/tasks.json"
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "Create virtual environment",
"detail": "Create .venv and upgrade pip, setuptools and wheel",
"command": "python3",
"args": [
"-m",
"venv",
".venv",
"--prompt",
"InvokeAI",
"--upgrade-deps"
],
"runOptions": {
"instanceLimit": 1,
"reevaluateOnRerun": true
},
"group": {
"kind": "build"
},
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "shared",
"showReuseMessage": true,
"clear": false
}
},
{
"label": "build InvokeAI",
"detail": "Build pyproject.toml with extras dev, docs and test",
"command": "${workspaceFolder}/.venv/bin/python3",
"args": [
"-m",
"pip",
"install",
"--use-pep517",
"--editable",
".[dev,docs,test]"
],
"dependsOn": "Create virtual environment",
"dependsOrder": "sequence",
"group": {
"kind": "build",
"isDefault": true
},
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "shared",
"showReuseMessage": true,
"clear": false
}
}
]
}
```
The fastest way to build InvokeAI now is ++cmd+shift+b++
## launch.json
This file is used to define debugger configurations, so that you can one-click
launch and monitor the application, set halt points to inspect specific states,
...
```json title=".vscode/launch.json"
{
"version": "0.2.0",
"configurations": [
{
"name": "invokeai web",
"type": "python",
"request": "launch",
"program": ".venv/bin/invokeai",
"justMyCode": true
},
{
"name": "invokeai cli",
"type": "python",
"request": "launch",
"program": ".venv/bin/invokeai",
"justMyCode": true
},
{
"name": "mkdocs serve",
"type": "python",
"request": "launch",
"program": ".venv/bin/mkdocs",
"args": ["serve"],
"justMyCode": true
}
]
}
```
Then you only need to hit ++f5++ and the fun begins :nerd: (It is asumed that
you have created a virtual environment via the [tasks](#tasksjson) from the
previous step.)
## extensions.json
A list of recommended vscode-extensions to make your life easier:
```json title=".vscode/extensions.json"
{
"recommendations": [
"editorconfig.editorconfig",
"github.vscode-pull-request-github",
"ms-python.black-formatter",
"ms-python.flake8",
"ms-python.isort",
"ms-python.python",
"ms-python.vscode-pylance",
"redhat.vscode-yaml",
"tamasfe.even-better-toml",
"eamodio.gitlens",
"foxundermoon.shell-format",
"timonwong.shellcheck",
"esbenp.prettier-vscode",
"davidanson.vscode-markdownlint",
"yzhang.markdown-all-in-one",
"bierner.github-markdown-preview",
"ms-azuretools.vscode-docker",
"mads-hartmann.bash-ide-vscode"
]
}
```
## settings.json
With bellow settings your files already get formated when you save them (only
your modifications if available), which will help you to not run into trouble
with the pre-commit hooks. If the hooks fail, they will prevent you from
commiting, but most hooks directly add a fixed version, so that you just need to
stage and commit them:
```json title=".vscode/settings.json"
{
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.quickSuggestions": {
"comments": false,
"strings": true,
"other": true
},
"editor.suggest.insertMode": "replace",
"gitlens.codeLens.scopes": ["document"]
},
"[jsonc]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "file"
},
"[toml]": {
"editor.defaultFormatter": "tamasfe.even-better-toml",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[yaml]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[markdown]": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.rulers": [80],
"editor.unicodeHighlight.ambiguousCharacters": false,
"editor.unicodeHighlight.invisibleCharacters": false,
"diffEditor.ignoreTrimWhitespace": false,
"editor.wordWrap": "on",
"editor.quickSuggestions": {
"comments": "off",
"strings": "off",
"other": "off"
},
"editor.formatOnSave": true,
"editor.formatOnSaveMode": "modificationsIfAvailable"
},
"[shellscript]": {
"editor.defaultFormatter": "foxundermoon.shell-format"
},
"[ignore]": {
"editor.defaultFormatter": "foxundermoon.shell-format"
},
"editor.rulers": [88],
"evenBetterToml.formatter.alignEntries": false,
"evenBetterToml.formatter.allowedBlankLines": 1,
"evenBetterToml.formatter.arrayAutoExpand": true,
"evenBetterToml.formatter.arrayTrailingComma": true,
"evenBetterToml.formatter.arrayAutoCollapse": true,
"evenBetterToml.formatter.columnWidth": 88,
"evenBetterToml.formatter.compactArrays": true,
"evenBetterToml.formatter.compactInlineTables": true,
"evenBetterToml.formatter.indentEntries": false,
"evenBetterToml.formatter.inlineTableExpand": true,
"evenBetterToml.formatter.reorderArrays": true,
"evenBetterToml.formatter.reorderKeys": true,
"evenBetterToml.formatter.compactEntries": false,
"evenBetterToml.schema.enabled": true,
"python.analysis.typeCheckingMode": "basic",
"python.formatting.provider": "black",
"python.languageServer": "Pylance",
"python.linting.enabled": true,
"python.linting.flake8Enabled": true,
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true,
"python.testing.pytestArgs": [
"tests",
"--cov=ldm",
"--cov-branch",
"--cov-report=term:skip-covered"
],
"yaml.schemas": {
"https://json.schemastore.org/prettierrc.json": "${workspaceFolder}/.prettierrc.yaml"
}
}
```

View File

@ -0,0 +1,135 @@
---
title: Pull-Request
---
# :octicons-git-pull-request-16: Pull-Request
## pre-requirements
To follow the steps in this tutorial you will need:
- [GitHub](https://github.com) account
- [git](https://git-scm.com/downloads) source controll
- Text / Code Editor (personally I preffer
[Visual Studio Code](https://code.visualstudio.com/Download))
- Terminal:
- If you are on Linux/MacOS you can use bash or zsh
- for Windows Users the commands are written for PowerShell
## Fork Repository
The first step to be done if you want to contribute to InvokeAI, is to fork the
rpeository.
Since you are already reading this doc, the easiest way to do so is by clicking
[here](https://github.com/invoke-ai/InvokeAI/fork). You could also open
[InvokeAI](https://github.com/invoke-ai/InvoekAI) and click on the "Fork" Button
in the top right.
## Clone your fork
After you forked the Repository, you should clone it to your dev machine:
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
``` sh
git clone https://github.com/<github username>/InvokeAI \
&& cd InvokeAI
```
=== ":fontawesome-brands-windows:Windows"
``` powershell
git clone https://github.com/<github username>/InvokeAI `
&& cd InvokeAI
```
## Install in Editable Mode
To install InvokeAI in editable mode, (as always) we recommend to create and
activate a venv first. Afterwards you can install the InvokeAI Package,
including dev and docs extras in editable mode, follwed by the installation of
the pre-commit hook:
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
``` sh
python -m venv .venv \
--prompt InvokeAI \
--upgrade-deps \
&& source .venv/bin/activate \
&& pip install \
--upgrade-deps \
--use-pep517 \
--editable=".[dev,docs]" \
&& pre-commit install
```
=== ":fontawesome-brands-windows:Windows"
``` powershell
python -m venv .venv `
--prompt InvokeAI `
--upgrade-deps `
&& .venv/scripts/activate.ps1 `
&& pip install `
--upgrade `
--use-pep517 `
--editable=".[dev,docs]" `
&& pre-commit install
```
## Create a branch
Make sure you are on main branch, from there create your feature branch:
=== ":fontawesome-brands-linux:Linux / :simple-apple:macOS"
``` sh
git checkout main \
&& git pull \
&& git checkout -B <branch name>
```
=== ":fontawesome-brands-windows:Windows"
``` powershell
git checkout main `
&& git pull `
&& git checkout -B <branch name>
```
## Commit your changes
When you are done with adding / updating content, you need to commit those
changes to your repository before you can actually open an PR:
```{ .sh .annotate }
git add <files you have changed> # (1)!
git commit -m "A commit message which describes your change"
git push
```
1. Replace this with a space seperated list of the files you changed, like:
`README.md foo.sh bar.json baz`
## Create a Pull Request
After pushing your changes, you are ready to create a Pull Request. just head
over to your fork on [GitHub](https://github.com), which should already show you
a message that there have been recent changes on your feature branch and a green
button which you could use to create the PR.
The default target for your PRs would be the main branch of
[invoke-ai/InvokeAI](https://github.com/invoke-ai/InvokeAI)
Another way would be to create it in VS-Code or via the GitHub CLI (or even via
the GitHub CLI in a VS-Code Terminal Window 🤭):
```sh
gh pr create
```
The CLI will inform you if there are still unpushed commits on your branch. It
will also prompt you for things like the the Title and the Body (Description) if
you did not already pass them as arguments.

View File

@ -0,0 +1,26 @@
---
title: Issues
---
# :octicons-issue-opened-16: Issues
## :fontawesome-solid-bug: Report a bug
If you stumbled over a bug while using InvokeAI, we would apreciate it a lot if
you
[open a issue](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
to inform us about the details so that our developers can look into it.
If you also know how to fix the bug, take a look [here](010_PULL_REQUEST.md) to
find out how to create a Pull Request.
## Request a feature
If you have a idea for a new feature on your mind which you would like to see in
InvokeAI, there is a
[feature request](https://github.com/invoke-ai/InvokeAI/issues/new?assignees=&labels=bug&template=BUG_REPORT.yml&title=%5Bbug%5D%3A+)
available in the issues section of the repository.
If you are just curious which features already got requested you can find the
overview of open requests
[here](https://github.com/invoke-ai/InvokeAI/labels/enhancement)

View File

@ -0,0 +1,32 @@
---
title: docs
---
# :simple-readthedocs: MkDocs-Material
If you want to contribute to the docs, there is a easy way to verify the results
of your changes before commiting them.
Just follow the steps in the [Pull-Requests](010_PULL_REQUEST.md) docs, there we
already
[create a venv and install the docs extras](010_PULL_REQUEST.md#install-in-editable-mode).
When installed it's as simple as:
```sh
mkdocs serve
```
This will build the docs locally and serve them on your local host, even
auto-refresh is included, so you can just update a doc, save it and tab to the
browser, without the needs of restarting the `mkdocs serve`.
More information about the "mkdocs flavored markdown syntax" can be found
[here](https://squidfunk.github.io/mkdocs-material/reference/).
## :material-microsoft-visual-studio-code:VS-Code
We also provide a
[launch configuration for VS-Code](../IDE-Settings/vs-code.md#launchjson) which
includes a `mkdocs serve` entrypoint as well. You also don't have to worry about
the formatting since this is automated via prettier, but this is of course not
limited to VS-Code.

View File

@ -0,0 +1,76 @@
# Tranformation to nodes
## Current state
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img| generate(generate);
web --> |txt2img| generate(generate);
cli --> |txt2img| generate(generate);
cli --> |img2img| generate(generate);
generate --> model_manager;
generate --> generators;
generate --> ti_manager[TI Manager];
generate --> etc;
```
## Transitional Architecture
### first step
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img| img2img_node(Img2img node);
web --> |txt2img| generate(generate);
img2img_node --> model_manager;
img2img_node --> generators;
cli --> |txt2img| generate;
cli --> |img2img| generate;
generate --> model_manager;
generate --> generators;
generate --> ti_manager[TI Manager];
generate --> etc;
```
### second step
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img| img2img_node(img2img node);
img2img_node --> model_manager;
img2img_node --> generators;
web --> |txt2img| txt2img_node(txt2img node);
cli --> |txt2img| txt2img_node;
cli --> |img2img| generate(generate);
generate --> model_manager;
generate --> generators;
generate --> ti_manager[TI Manager];
generate --> etc;
txt2img_node --> model_manager;
txt2img_node --> generators;
txt2img_node --> ti_manager[TI Manager];
```
## Final Architecture
```mermaid
flowchart TD
web[WebUI];
cli[CLI];
web --> |img2img|img2img_node(img2img node);
cli --> |img2img|img2img_node;
web --> |txt2img|txt2img_node(txt2img node);
cli --> |txt2img|txt2img_node;
img2img_node --> model_manager;
txt2img_node --> model_manager;
img2img_node --> generators;
txt2img_node --> generators;
img2img_node --> ti_manager[TI Manager];
txt2img_node --> ti_manager[TI Manager];
```

View File

@ -0,0 +1,16 @@
---
title: Contributing
---
# :fontawesome-solid-code-commit: Contributing
There are different ways how you can contribute to
[InvokeAI](https://github.com/invoke-ai/InvokeAI), like Translations, opening
Issues for Bugs or ideas how to improve.
This Section of the docs will explain some of the different ways of how you can
contribute to make it easier for newcommers as well as advanced users :nerd:
If you want to contribute code, but you do not have an exact idea yet, take a
look at the currently open
[:fontawesome-solid-bug: Bug Reports](https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen+label%3Abug)

12
docs/help/index.md Normal file
View File

@ -0,0 +1,12 @@
# :material-help:Help
If you are looking for help with the installation of InvokeAI, please take a
look into the [Installation](../installation/index.md) section of the docs.
Here you will find help to topics like
- how to contribute
- configuration recommendation for IDEs
If you have an Idea about what's missing and aren't scared from contributing,
just take a look at [DOCS](./contributing/030_DOCS.md) to find out how to do so.

View File

@ -2,6 +2,8 @@
title: Home title: Home
--- ---
# :octicons-home-16: Home
<!-- <!--
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as:: The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
@ -103,8 +105,8 @@ images in full-precision mode:
### :fontawesome-solid-memory: Memory and Disk ### :fontawesome-solid-memory: Memory and Disk
- At least 12 GB Main Memory RAM. - At least 12 GB Main Memory RAM.
- At least 18 GB of free disk space for the machine learning model, Python, and - At least 18 GB of free disk space for the machine learning model, Python,
all its dependencies. and all its dependencies.
## :octicons-package-dependencies-24: Installation ## :octicons-package-dependencies-24: Installation
@ -113,13 +115,21 @@ either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
driver). driver).
### [Installation Getting Started Guide](installation) ### [Installation Getting Started Guide](installation)
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md) #### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
This method is recommended for 1st time users This method is recommended for 1st time users
#### [Manual Installation](installation/020_INSTALL_MANUAL.md) #### [Manual Installation](installation/020_INSTALL_MANUAL.md)
This method is recommended for experienced users and developers This method is recommended for experienced users and developers
#### [Docker Installation](installation/040_INSTALL_DOCKER.md) #### [Docker Installation](installation/040_INSTALL_DOCKER.md)
This method is recommended for those familiar with running Docker containers This method is recommended for those familiar with running Docker containers
### Other Installation Guides ### Other Installation Guides
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md) - [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
- [XFormers](installation/070_INSTALL_XFORMERS.md) - [XFormers](installation/070_INSTALL_XFORMERS.md)
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md) - [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
@ -128,14 +138,19 @@ This method is recommended for those familiar with running Docker containers
## :octicons-gift-24: InvokeAI Features ## :octicons-gift-24: InvokeAI Features
### The InvokeAI Web Interface ### The InvokeAI Web Interface
- [WebUI overview](features/WEB.md) - [WebUI overview](features/WEB.md)
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md) - [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md) - [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
<!-- separator --> <!-- separator -->
### The InvokeAI Command Line Interface ### The InvokeAI Command Line Interface
- [Command Line Interace Reference Guide](features/CLI.md) - [Command Line Interace Reference Guide](features/CLI.md)
<!-- separator --> <!-- separator -->
### Image Management ### Image Management
- [Image2Image](features/IMG2IMG.md) - [Image2Image](features/IMG2IMG.md)
- [Inpainting](features/INPAINTING.md) - [Inpainting](features/INPAINTING.md)
- [Outpainting](features/OUTPAINTING.md) - [Outpainting](features/OUTPAINTING.md)
@ -145,14 +160,18 @@ This method is recommended for those familiar with running Docker containers
- [Other Features](features/OTHER.md) - [Other Features](features/OTHER.md)
<!-- separator --> <!-- separator -->
### Model Management ### Model Management
- [Installing](installation/050_INSTALLING_MODELS.md) - [Installing](installation/050_INSTALLING_MODELS.md)
- [Model Merging](features/MODEL_MERGING.md) - [Model Merging](features/MODEL_MERGING.md)
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md) - [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
- [Textual Inversion](features/TEXTUAL_INVERSION.md) - [Textual Inversion](features/TEXTUAL_INVERSION.md)
- [Not Safe for Work (NSFW) Checker](features/NSFW.md) - [Not Safe for Work (NSFW) Checker](features/NSFW.md)
<!-- seperator --> <!-- seperator -->
### Prompt Engineering ### Prompt Engineering
- [Prompt Syntax](features/PROMPTS.md) - [Prompt Syntax](features/PROMPTS.md)
- [Generating Variations](features/VARIATIONS.md) - [Generating Variations](features/VARIATIONS.md)
@ -162,84 +181,188 @@ This method is recommended for those familiar with running Docker containers
#### Migration to Stable Diffusion `diffusers` models #### Migration to Stable Diffusion `diffusers` models
Previous versions of InvokeAI supported the original model file format introduced with Stable Diffusion 1.4. In the original format, known variously as "checkpoint", or "legacy" format, there is a single large weights file ending with `.ckpt` or `.safetensors`. Though this format has served the community well, it has a number of disadvantages, including file size, slow loading times, and a variety of non-standard variants that require special-case code to handle. In addition, because checkpoint files are actually a bundle of multiple machine learning sub-models, it is hard to swap different sub-models in and out, or to share common sub-models. A new format, introduced by the StabilityAI company in collaboration with HuggingFace, is called `diffusers` and consists of a directory of individual models. The most immediate benefit of `diffusers` is that they load from disk very quickly. A longer term benefit is that in the near future `diffusers` models will be able to share common sub-models, dramatically reducing disk space when you have multiple fine-tune models derived from the same base. Previous versions of InvokeAI supported the original model file format
introduced with Stable Diffusion 1.4. In the original format, known variously as
"checkpoint", or "legacy" format, there is a single large weights file ending
with `.ckpt` or `.safetensors`. Though this format has served the community
well, it has a number of disadvantages, including file size, slow loading times,
and a variety of non-standard variants that require special-case code to handle.
In addition, because checkpoint files are actually a bundle of multiple machine
learning sub-models, it is hard to swap different sub-models in and out, or to
share common sub-models. A new format, introduced by the StabilityAI company in
collaboration with HuggingFace, is called `diffusers` and consists of a
directory of individual models. The most immediate benefit of `diffusers` is
that they load from disk very quickly. A longer term benefit is that in the near
future `diffusers` models will be able to share common sub-models, dramatically
reducing disk space when you have multiple fine-tune models derived from the
same base.
When you perform a new install of version 2.3.0, you will be offered the option to install the `diffusers` versions of a number of popular SD models, including Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of 2.1). These will act and work just like the checkpoint versions. Do not be concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk! InvokeAI 2.3.0 can still load these and generate images from them without any extra intervention on your part. When you perform a new install of version 2.3.0, you will be offered the option
to install the `diffusers` versions of a number of popular SD models, including
Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of
2.1). These will act and work just like the checkpoint versions. Do not be
concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk!
InvokeAI 2.3.0 can still load these and generate images from them without any
extra intervention on your part.
To take advantage of the optimized loading times of `diffusers` models, InvokeAI offers options to convert legacy checkpoint models into optimized `diffusers` models. If you use the `invokeai` command line interface, the relevant commands are: To take advantage of the optimized loading times of `diffusers` models, InvokeAI
offers options to convert legacy checkpoint models into optimized `diffusers`
models. If you use the `invokeai` command line interface, the relevant commands
are:
* `!convert_model` -- Take the path to a local checkpoint file or a URL that is pointing to one, convert it into a `diffusers` model, and import it into InvokeAI's models registry file. - `!convert_model` -- Take the path to a local checkpoint file or a URL that
* `!optimize_model` -- If you already have a checkpoint model in your InvokeAI models file, this command will accept its short name and convert it into a like-named `diffusers` model, optionally deleting the original checkpoint file. is pointing to one, convert it into a `diffusers` model, and import it into
* `!import_model` -- Take the local path of either a checkpoint file or a `diffusers` model directory and import it into InvokeAI's registry file. You may also provide the ID of any diffusers model that has been published on the [HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads) and it will be downloaded and installed automatically. InvokeAI's models registry file.
- `!optimize_model` -- If you already have a checkpoint model in your InvokeAI
models file, this command will accept its short name and convert it into a
like-named `diffusers` model, optionally deleting the original checkpoint
file.
- `!import_model` -- Take the local path of either a checkpoint file or a
`diffusers` model directory and import it into InvokeAI's registry file. You
may also provide the ID of any diffusers model that has been published on
the
[HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads)
and it will be downloaded and installed automatically.
The WebGUI offers similar functionality for model management. The WebGUI offers similar functionality for model management.
For advanced users, new command-line options provide additional functionality. Launching `invokeai` with the argument `--autoconvert <path to directory>` takes the path to a directory of checkpoint files, automatically converts them into `diffusers` models and imports them. Each time the script is launched, the directory will be scanned for new checkpoint files to be loaded. Alternatively, the `--ckpt_convert` argument will cause any checkpoint or safetensors model that is already registered with InvokeAI to be converted into a `diffusers` model on the fly, allowing you to take advantage of future diffusers-only features without explicitly converting the model and saving it to disk. For advanced users, new command-line options provide additional functionality.
Launching `invokeai` with the argument `--autoconvert <path to directory>` takes
the path to a directory of checkpoint files, automatically converts them into
`diffusers` models and imports them. Each time the script is launched, the
directory will be scanned for new checkpoint files to be loaded. Alternatively,
the `--ckpt_convert` argument will cause any checkpoint or safetensors model
that is already registered with InvokeAI to be converted into a `diffusers`
model on the fly, allowing you to take advantage of future diffusers-only
features without explicitly converting the model and saving it to disk.
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management in both the command-line and Web interfaces. Please see
[INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/)
for more information on model management in both the command-line and Web
interfaces.
#### Support for the `XFormers` Memory-Efficient Crossattention Package #### Support for the `XFormers` Memory-Efficient Crossattention Package
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once installed, the`xformers` package dramatically reduces the memory footprint of loaded Stable Diffusion models files and modestly increases image generation speed. `xformers` will be installed and activated automatically if you specify a CUDA system at install time. On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once
installed, the`xformers` package dramatically reduces the memory footprint of
loaded Stable Diffusion models files and modestly increases image generation
speed. `xformers` will be installed and activated automatically if you specify a
CUDA system at install time.
The caveat with using `xformers` is that it introduces slightly non-deterministic behavior, and images generated using the same seed and other settings will be subtly different between invocations. Generally the changes are unnoticeable unless you rapidly shift back and forth between images, but to disable `xformers` and restore fully deterministic behavior, you may launch InvokeAI using the `--no-xformers` option. This is most conveniently done by opening the file `invokeai/invokeai.init` with a text editor, and adding the line `--no-xformers` at the bottom. The caveat with using `xformers` is that it introduces slightly
non-deterministic behavior, and images generated using the same seed and other
settings will be subtly different between invocations. Generally the changes are
unnoticeable unless you rapidly shift back and forth between images, but to
disable `xformers` and restore fully deterministic behavior, you may launch
InvokeAI using the `--no-xformers` option. This is most conveniently done by
opening the file `invokeai/invokeai.init` with a text editor, and adding the
line `--no-xformers` at the bottom.
#### A Negative Prompt Box in the WebUI #### A Negative Prompt Box in the WebUI
There is now a separate text input box for negative prompts in the WebUI. This is convenient for stashing frequently-used negative prompts ("mangled limbs, bad anatomy"). The `[negative prompt]` syntax continues to work in the main prompt box as well. There is now a separate text input box for negative prompts in the WebUI. This
is convenient for stashing frequently-used negative prompts ("mangled limbs, bad
anatomy"). The `[negative prompt]` syntax continues to work in the main prompt
box as well.
To see exactly how your prompts are being parsed, launch `invokeai` with the `--log_tokenization` option. The console window will then display the tokenization process for both positive and negative prompts. To see exactly how your prompts are being parsed, launch `invokeai` with the
`--log_tokenization` option. The console window will then display the
tokenization process for both positive and negative prompts.
#### Model Merging #### Model Merging
Version 2.3.0 offers an intuitive user interface for merging up to three Stable Diffusion models using an intuitive user interface. Model merging allows you to mix the behavior of models to achieve very interesting effects. To use this, each of the models must already be imported into InvokeAI and saved in `diffusers` format, then launch the merger using a new menu item in the InvokeAI launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line with `invokeai-merge --gui`. You will be prompted to select the models to merge, the proportions in which to mix them, and the mixing algorithm. The script will create a new merged `diffusers` model and import it into InvokeAI for your use. Version 2.3.0 offers an intuitive user interface for merging up to three Stable
Diffusion models using an intuitive user interface. Model merging allows you to
mix the behavior of models to achieve very interesting effects. To use this,
each of the models must already be imported into InvokeAI and saved in
`diffusers` format, then launch the merger using a new menu item in the InvokeAI
launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line
with `invokeai-merge --gui`. You will be prompted to select the models to merge,
the proportions in which to mix them, and the mixing algorithm. The script will
create a new merged `diffusers` model and import it into InvokeAI for your use.
See [MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/) for more details. See
[MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/)
for more details.
#### Textual Inversion Training #### Textual Inversion Training
Textual Inversion (TI) is a technique for training a Stable Diffusion model to emit a particular subject or style when triggered by a keyword phrase. You can perform TI training by placing a small number of images of the subject or style in a directory, and choosing a distinctive trigger phrase, such as "pointillist-style". After successful training, The subject or style will be activated by including `<pointillist-style>` in your prompt. Textual Inversion (TI) is a technique for training a Stable Diffusion model to
emit a particular subject or style when triggered by a keyword phrase. You can
perform TI training by placing a small number of images of the subject or style
in a directory, and choosing a distinctive trigger phrase, such as
"pointillist-style". After successful training, The subject or style will be
activated by including `<pointillist-style>` in your prompt.
Previous versions of InvokeAI were able to perform TI, but it required using a command-line script with dozens of obscure command-line arguments. Version 2.3.0 features an intuitive TI frontend that will build a TI model on top of any `diffusers` model. To access training you can launch from a new item in the launcher script or from the command line using `invokeai-ti --gui`. Previous versions of InvokeAI were able to perform TI, but it required using a
command-line script with dozens of obscure command-line arguments. Version 2.3.0
features an intuitive TI frontend that will build a TI model on top of any
`diffusers` model. To access training you can launch from a new item in the
launcher script or from the command line using `invokeai-ti --gui`.
See [TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) for further details. See
[TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/)
for further details.
#### A New Installer Experience #### A New Installer Experience
The InvokeAI installer has been upgraded in order to provide a smoother and hopefully more glitch-free experience. In addition, InvokeAI is now packaged as a PyPi project, allowing developers and power-users to install InvokeAI with the command `pip install InvokeAI --use-pep517`. Please see [Installation](#installation) for details. The InvokeAI installer has been upgraded in order to provide a smoother and
hopefully more glitch-free experience. In addition, InvokeAI is now packaged as
a PyPi project, allowing developers and power-users to install InvokeAI with the
command `pip install InvokeAI --use-pep517`. Please see
[Installation](#installation) for details.
Developers should be aware that the `pip` installation procedure has been simplified and that the `conda` method is no longer supported at all. Accordingly, the `environments_and_requirements` directory has been deleted from the repository. Developers should be aware that the `pip` installation procedure has been
simplified and that the `conda` method is no longer supported at all.
Accordingly, the `environments_and_requirements` directory has been deleted from
the repository.
#### Command-line name changes #### Command-line name changes
All of InvokeAI's functionality, including the WebUI, command-line interface, textual inversion training and model merging, can all be accessed from the `invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been expanded to add the new functionality. For the convenience of developers and power users, we have normalized the names of the InvokeAI command-line scripts: All of InvokeAI's functionality, including the WebUI, command-line interface,
textual inversion training and model merging, can all be accessed from the
`invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been
expanded to add the new functionality. For the convenience of developers and
power users, we have normalized the names of the InvokeAI command-line scripts:
* `invokeai` -- Command-line client - `invokeai` -- Command-line client
* `invokeai --web` -- Web GUI - `invokeai --web` -- Web GUI
* `invokeai-merge --gui` -- Model merging script with graphical front end - `invokeai-merge --gui` -- Model merging script with graphical front end
* `invokeai-ti --gui` -- Textual inversion script with graphical front end - `invokeai-ti --gui` -- Textual inversion script with graphical front end
* `invokeai-configure` -- Configuration tool for initializing the `invokeai` directory and selecting popular starter models. - `invokeai-configure` -- Configuration tool for initializing the `invokeai`
directory and selecting popular starter models.
For backward compatibility, the old command names are also recognized, including `invoke.py` and `configure-invokeai.py`. However, these are deprecated and will eventually be removed. For backward compatibility, the old command names are also recognized, including
`invoke.py` and `configure-invokeai.py`. However, these are deprecated and will
eventually be removed.
Developers should be aware that the locations of the script's source code has been moved. The new locations are: Developers should be aware that the locations of the script's source code has
* `invokeai` => `ldm/invoke/CLI.py` been moved. The new locations are:
* `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
* `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
* `invokeai-merge` => `ldm/invoke/merge_diffusers`
Developers are strongly encouraged to perform an "editable" install of InvokeAI using `pip install -e . --use-pep517` in the Git repository, and then to call the scripts using their 2.3.0 names, rather than executing the scripts directly. Developers should also be aware that the several important data files have been relocated into a new directory named `invokeai`. This includes the WebGUI's `frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used by the installer to select starter models. Eventually all InvokeAI modules will be in subdirectories of `invokeai`. - `invokeai` => `ldm/invoke/CLI.py`
- `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
- `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
- `invokeai-merge` => `ldm/invoke/merge_diffusers`
Please see [2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0) for further details. Developers are strongly encouraged to perform an "editable" install of InvokeAI
For older changelogs, please visit the using `pip install -e . --use-pep517` in the Git repository, and then to call
the scripts using their 2.3.0 names, rather than executing the scripts directly.
Developers should also be aware that the several important data files have been
relocated into a new directory named `invokeai`. This includes the WebGUI's
`frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used
by the installer to select starter models. Eventually all InvokeAI modules will
be in subdirectories of `invokeai`.
Please see
[2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0)
for further details. For older changelogs, please visit the
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**. **[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
## :material-target: Troubleshooting ## :material-target: Troubleshooting
Please check out our **[:material-frequently-asked-questions: Please check out our
Troubleshooting **[:material-frequently-asked-questions: Troubleshooting Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)**
Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)** to to get solutions for common installation problems and other issues.
get solutions for common installation problems and other issues.
## :octicons-repo-push-24: Contributing ## :octicons-repo-push-24: Contributing
@ -265,8 +388,8 @@ thank them for their time, hard work and effort.
For support, please use this repository's GitHub Issues tracking service. Feel For support, please use this repository's GitHub Issues tracking service. Feel
free to send me an email if you use and like the script. free to send me an email if you use and like the script.
Original portions of the software are Copyright (c) 2022-23 Original portions of the software are Copyright (c) 2022-23 by
by [The InvokeAI Team](https://github.com/invoke-ai). [The InvokeAI Team](https://github.com/invoke-ai).
## :octicons-book-24: Further Reading ## :octicons-book-24: Further Reading

View File

@ -1,5 +0,0 @@
mkdocs
mkdocs-material>=8, <9
mkdocs-git-revision-date-localized-plugin
mkdocs-redirects==1.2.0

View File

@ -2,14 +2,14 @@
# General # General
site_name: InvokeAI Stable Diffusion Toolkit Docs site_name: InvokeAI Stable Diffusion Toolkit Docs
site_url: https://invoke-ai.github.io/InvokeAI site_url: !ENV [SITE_URL, 'https://invoke-ai.github.io/InvokeAI']
site_author: mauwii site_author: mauwii
dev_addr: '127.0.0.1:8080' dev_addr: '127.0.0.1:8080'
# Repository # Repository
repo_name: 'invoke-ai/InvokeAI' repo_name: !ENV [REPO_NAME, 'invoke-ai/InvokeAI']
repo_url: 'https://github.com/invoke-ai/InvokeAI' repo_url: !ENV [REPO_URL, 'https://github.com/invoke-ai/InvokeAI']
edit_uri: edit/main/docs/ edit_uri: blob/main/docs/
# Copyright # Copyright
copyright: Copyright &copy; 2022 InvokeAI Team copyright: Copyright &copy; 2022 InvokeAI Team
@ -19,7 +19,8 @@ theme:
name: material name: material
icon: icon:
repo: fontawesome/brands/github repo: fontawesome/brands/github
edit: material/file-document-edit-outline edit: material/pencil
view: material/eye
palette: palette:
- media: '(prefers-color-scheme: light)' - media: '(prefers-color-scheme: light)'
scheme: default scheme: default
@ -33,6 +34,11 @@ theme:
icon: material/lightbulb-outline icon: material/lightbulb-outline
name: Switch to light mode name: Switch to light mode
features: features:
- content.action.edit
- content.action.view
- content.code.copy
- content.tabs.link
- navigation.indexes
- navigation.instant - navigation.instant
- navigation.tabs - navigation.tabs
- navigation.top - navigation.top

View File

@ -1,43 +1,37 @@
[build-system] [build-system]
requires = ["setuptools~=65.5", "pip~=22.3", "wheel"]
build-backend = "setuptools.build_meta" build-backend = "setuptools.build_meta"
requires = ["setuptools ~= 67.1", "wheel"]
[project] [project]
name = "InvokeAI"
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
requires-python = ">=3.9, <3.11"
readme = { content-type = "text/markdown", file = "README.md" }
keywords = ["stable-diffusion", "AI"]
dynamic = ["version"]
license = { file = "LICENSE" }
authors = [{name = "The InvokeAI Project", email = "lincoln.stein@gmail.com"}] authors = [{name = "The InvokeAI Project", email = "lincoln.stein@gmail.com"}]
classifiers = [ classifiers = [
'Development Status :: 4 - Beta', "Development Status :: 4 - Beta",
'Environment :: GPU', "Environment :: GPU :: NVIDIA CUDA",
'Environment :: GPU :: NVIDIA CUDA', "Environment :: GPU",
'Environment :: MacOS X', "Environment :: MacOS X",
'Intended Audience :: End Users/Desktop', "Intended Audience :: Developers",
'Intended Audience :: Developers', "Intended Audience :: End Users/Desktop",
'License :: OSI Approved :: MIT License', "License :: OSI Approved :: MIT License",
'Operating System :: POSIX :: Linux', "Operating System :: MacOS",
'Operating System :: MacOS', "Operating System :: Microsoft :: Windows",
'Operating System :: Microsoft :: Windows', "Operating System :: POSIX :: Linux",
'Programming Language :: Python :: 3 :: Only', "Programming Language :: Python :: 3 :: Only",
'Programming Language :: Python :: 3.8', "Programming Language :: Python :: 3",
'Programming Language :: Python :: 3.9', "Programming Language :: Python :: 3.10",
'Programming Language :: Python :: 3.10', "Programming Language :: Python :: 3.9",
'Topic :: Artistic Software', "Programming Language :: Python",
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', "Topic :: Artistic Software",
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server', "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
'Topic :: Multimedia :: Graphics', "Topic :: Internet :: WWW/HTTP :: WSGI :: Server",
'Topic :: Scientific/Engineering :: Artificial Intelligence', "Topic :: Multimedia :: Graphics",
'Topic :: Scientific/Engineering :: Image Processing', "Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Processing",
] ]
dependencies = [ dependencies = [
"accelerate", "accelerate",
"albumentations", "albumentations",
"click", "click",
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "clip_anytorch",
"compel==0.1.7", "compel==0.1.7",
"datasets", "datasets",
"diffusers[torch]~=0.13", "diffusers[torch]~=0.13",
@ -54,7 +48,7 @@ dependencies = [
"huggingface-hub>=0.11.1", "huggingface-hub>=0.11.1",
"imageio", "imageio",
"imageio-ffmpeg", "imageio-ffmpeg",
"k-diffusion", # replacing "k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip", "k-diffusion",
"kornia", "kornia",
"npyscreen", "npyscreen",
"numpy<1.24", "numpy<1.24",
@ -62,8 +56,8 @@ dependencies = [
"opencv-python", "opencv-python",
"picklescan", "picklescan",
"pillow", "pillow",
"pudb",
"prompt-toolkit", "prompt-toolkit",
"pudb",
"pypatchmatch", "pypatchmatch",
"pyreadline3", "pyreadline3",
"pytorch-lightning==1.7.7", "pytorch-lightning==1.7.7",
@ -75,62 +69,116 @@ dependencies = [
"streamlit", "streamlit",
"taming-transformers-rom1504", "taming-transformers-rom1504",
"test-tube>=0.7.5", "test-tube>=0.7.5",
"torch>=1.13.1",
"torch-fidelity", "torch-fidelity",
"torchvision>=0.14.1", "torch>=1.13.1",
"torchmetrics", "torchmetrics",
"torchvision>=0.14.1",
"transformers~=4.25", "transformers~=4.25",
"windows-curses; sys_platform=='win32'", "windows-curses; sys_platform=='win32'",
] ]
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
dynamic = ["version"]
keywords = ["AI", "stable-diffusion"]
license = {text = "MIT"}
name = "InvokeAI"
readme = {content-type = "text/markdown", file = "README.md"}
requires-python = ">=3.9, <3.11"
[project.optional-dependencies] [project.optional-dependencies]
"dev" = [
"black[jupyter]",
"flake8",
"flake8-black",
"flake8-bugbear",
"isort",
"pre-commit",
]
"dist" = ["pip-tools", "pipdeptree", "twine"] "dist" = ["pip-tools", "pipdeptree", "twine"]
"docs" = [ "docs" = [
"mkdocs-material<9.0",
"mkdocs-git-revision-date-localized-plugin", "mkdocs-git-revision-date-localized-plugin",
"mkdocs-material==9.*",
"mkdocs-redirects==1.2.0", "mkdocs-redirects==1.2.0",
] ]
"test" = ["pytest>6.0.0", "pytest-cov"] "test" = ["pytest-cov", "pytest>6.0.0"]
"xformers" = [ "xformers" = [
"xformers~=0.0.16; sys_platform!='darwin'",
"triton; sys_platform=='linux'", "triton; sys_platform=='linux'",
"xformers~=0.0.16; sys_platform!='darwin'",
] ]
[project.scripts] [project.scripts]
# legacy entrypoints; provided for backwards compatibility # legacy entrypoints; provided for backwards compatibility
"invoke.py" = "ldm.invoke.CLI:main"
"configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main" "configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main"
"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main" "invoke.py" = "ldm.invoke.CLI:main"
"merge_embeddings.py" = "ldm.invoke.merge_diffusers:main" "merge_embeddings.py" = "ldm.invoke.merge_diffusers:main"
"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main"
# modern entrypoints # modern entrypoints
"invokeai" = "ldm.invoke.CLI:main" "invokeai" = "ldm.invoke.CLI:main"
"invokeai-configure" = "ldm.invoke.config.invokeai_configure:main" "invokeai-configure" = "ldm.invoke.config.invokeai_configure:main"
"invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging "invokeai-merge" = "ldm.invoke.merge_diffusers:main"
"invokeai-ti" = "ldm.invoke.training.textual_inversion:main" "invokeai-ti" = "ldm.invoke.training.textual_inversion:main"
"invokeai-model-install" = "ldm.invoke.config.model_install:main"
"invokeai-update" = "ldm.invoke.config.invokeai_update:main"
[project.urls] [project.urls]
"Homepage" = "https://invoke-ai.github.io/InvokeAI/"
"Documentation" = "https://invoke-ai.github.io/InvokeAI/"
"Source" = "https://github.com/invoke-ai/InvokeAI/"
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues" "Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"
"Discord" = "https://discord.gg/ZmtBAhwWhy" "Discord" = "https://discord.gg/ZmtBAhwWhy"
"Documentation" = "https://invoke-ai.github.io/InvokeAI/"
"Homepage" = "https://invoke-ai.github.io/InvokeAI/"
"Source" = "https://github.com/invoke-ai/InvokeAI/"
[tool.setuptools]
license-files = ["LICENSE"]
[tool.setuptools.dynamic] [tool.setuptools.dynamic]
version = {attr = "ldm.invoke.__version__"} version = {attr = "ldm.invoke.__version__"}
[tool.setuptools.packages.find] [tool.setuptools.packages.find]
"include" = [
"invokeai.assets.web",
"invokeai.backend*",
"invokeai.configs*",
"invokeai.frontend.dist*",
"ldm*",
]
"where" = ["."] "where" = ["."]
"include" = ["invokeai.assets.web*", "invokeai.backend*", "invokeai.frontend.dist*", "invokeai.configs*", "ldm*"]
[tool.setuptools.package-data] [tool.setuptools.package-data]
"invokeai.assets.web" = ["**.png"] "invokeai.assets.web" = ["**.png"]
"invokeai.backend" = ["**.png"] "invokeai.configs" = ["**.example", "**.txt", "**.yaml"]
"invokeai.configs" = ["*.example", "**/*.yaml", "*.txt"]
"invokeai.frontend.dist" = ["**"] "invokeai.frontend.dist" = ["**"]
[tool.black]
extend-exclude = '''
/(
# skip legacy scripts
| scripts/orig_scripts
)/
'''
line-length = 88
target-version = ['py39']
[tool.isort]
atomic = true
extend_skip_glob = ["scripts/orig_scripts/*"]
filter_files = true
line_length = 120
profile = "black"
py_version = 39
remove_redundant_aliases = true
skip_gitignore = true
src_paths = ["installer", "invokeai", "ldm", "tests"]
virtual_env = ".venv"
[tool.coverage.run]
branch = true
parallel = true
[tool.coverage.report]
skip_covered = true
skip_empty = true
[tool.coverage.paths]
source = ["invokeai/backend", "ldm/invoke"]
[tool.pytest.ini_options] [tool.pytest.ini_options]
addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov-report=term:skip-covered --cov=ldm/invoke --cov=backend --cov-branch" addopts = ["--cov=invokeai/backend", "--cov=ldm/invoke"]