Merge branch 'main' of https://github.com/ParisNeo/ArtBot
57
.github/CODEOWNERS
vendored
@ -1,7 +1,50 @@
|
||||
ldm/invoke/pngwriter.py @CapableWeb
|
||||
ldm/invoke/server_legacy.py @CapableWeb
|
||||
scripts/legacy_api.py @CapableWeb
|
||||
tests/legacy_tests.sh @CapableWeb
|
||||
installer/ @ebr
|
||||
.github/workflows/ @mauwii
|
||||
docker/ @mauwii
|
||||
# continuous integration
|
||||
/.github/workflows/ @mauwii
|
||||
|
||||
# documentation
|
||||
/docs/ @lstein @mauwii @tildebyte
|
||||
mkdocs.yml @lstein @mauwii
|
||||
|
||||
# installation and configuration
|
||||
/pyproject.toml @mauwii @lstein @ebr
|
||||
/docker/ @mauwii
|
||||
/scripts/ @ebr @lstein
|
||||
/installer/ @ebr @lstein @tildebyte
|
||||
ldm/invoke/config @lstein @ebr
|
||||
invokeai/assets @lstein @ebr
|
||||
invokeai/configs @lstein @ebr
|
||||
/ldm/invoke/_version.py @lstein @blessedcoolant
|
||||
|
||||
# web ui
|
||||
/invokeai/frontend @blessedcoolant @psychedelicious
|
||||
/invokeai/backend @blessedcoolant @psychedelicious
|
||||
|
||||
# generation and model management
|
||||
/ldm/*.py @lstein
|
||||
/ldm/generate.py @lstein @keturn
|
||||
/ldm/invoke/args.py @lstein @blessedcoolant
|
||||
/ldm/invoke/ckpt* @lstein
|
||||
/ldm/invoke/ckpt_generator @lstein
|
||||
/ldm/invoke/CLI.py @lstein
|
||||
/ldm/invoke/config @lstein @ebr @mauwii
|
||||
/ldm/invoke/generator @keturn @damian0815
|
||||
/ldm/invoke/globals.py @lstein @blessedcoolant
|
||||
/ldm/invoke/merge_diffusers.py @lstein
|
||||
/ldm/invoke/model_manager.py @lstein @blessedcoolant
|
||||
/ldm/invoke/txt2mask.py @lstein
|
||||
/ldm/invoke/patchmatch.py @Kyle0654
|
||||
/ldm/invoke/restoration @lstein @blessedcoolant
|
||||
|
||||
# attention, textual inversion, model configuration
|
||||
/ldm/models @damian0815 @keturn
|
||||
/ldm/modules @damian0815 @keturn
|
||||
|
||||
# Nodes
|
||||
apps/ @Kyle0654
|
||||
|
||||
# legacy REST API
|
||||
# is CapableWeb still engaged?
|
||||
/ldm/invoke/pngwriter.py @CapableWeb
|
||||
/ldm/invoke/server_legacy.py @CapableWeb
|
||||
/scripts/legacy_api.py @CapableWeb
|
||||
/tests/legacy_tests.sh @CapableWeb
|
||||
|
@ -92,6 +92,7 @@ You will need one of the following:
|
||||
|
||||
- An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- An Apple computer with an M1 chip.
|
||||
- An AMD-based graphics card with 4GB or more VRAM memory. (Linux only)
|
||||
|
||||
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
||||
unable to run in half-precision mode and do not have sufficient VRAM
|
||||
|
@ -261,7 +261,7 @@ sections describe what's new for InvokeAI.
|
||||
[Installation](installation/index.md).
|
||||
- A streamlined manual installation process that works for both Conda and
|
||||
PIP-only installs. See
|
||||
[Manual Installation](installation/INSTALL_MANUAL.md).
|
||||
[Manual Installation](installation/020_INSTALL_MANUAL.md).
|
||||
- The ability to save frequently-used startup options (model to load, steps,
|
||||
sampler, etc) in a `.invokeai` file. See
|
||||
[Client](features/CLI.md)
|
||||
|
BIN
docs/assets/installer-walkthrough/choose-gpu.png
Normal file
After Width: | Height: | Size: 26 KiB |
BIN
docs/assets/installer-walkthrough/confirm-directory.png
Normal file
After Width: | Height: | Size: 20 KiB |
BIN
docs/assets/installer-walkthrough/downloading-models.png
Normal file
After Width: | Height: | Size: 37 KiB |
BIN
docs/assets/installer-walkthrough/unpacked-zipfile.png
Normal file
After Width: | Height: | Size: 57 KiB |
BIN
docs/assets/installing-models/webui-models-1.png
Normal file
After Width: | Height: | Size: 98 KiB |
BIN
docs/assets/installing-models/webui-models-2.png
Normal file
After Width: | Height: | Size: 94 KiB |
BIN
docs/assets/installing-models/webui-models-3.png
Normal file
After Width: | Height: | Size: 99 KiB |
BIN
docs/assets/installing-models/webui-models-4.png
Normal file
After Width: | Height: | Size: 98 KiB |
@ -6,38 +6,51 @@ title: Command-Line Interface
|
||||
|
||||
## **Interactive Command Line Interface**
|
||||
|
||||
The `invoke.py` script, located in `scripts/`, provides an interactive interface
|
||||
to image generation similar to the "invoke mothership" bot that Stable AI
|
||||
provided on its Discord server.
|
||||
The InvokeAI command line interface (CLI) provides scriptable access
|
||||
to InvokeAI's features.Some advanced features are only available
|
||||
through the CLI, though they eventually find their way into the WebUI.
|
||||
|
||||
Unlike the `txt2img.py` and `img2img.py` scripts provided in the original
|
||||
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion) source
|
||||
code repository, the time-consuming initialization of the AI model
|
||||
initialization only happens once. After that image generation from the
|
||||
command-line interface is very fast.
|
||||
The CLI is accessible from the `invoke.sh`/`invoke.bat` launcher by
|
||||
selecting option (1). Alternatively, it can be launched directly from
|
||||
the command line by activating the InvokeAI environment and giving the
|
||||
command:
|
||||
|
||||
```bash
|
||||
invokeai
|
||||
```
|
||||
|
||||
After some startup messages, you will be presented with the `invoke> `
|
||||
prompt. Here you can type prompts to generate images and issue other
|
||||
commands to load and manipulate generative models. The CLI has a large
|
||||
number of command-line options that control its behavior. To get a
|
||||
concise summary of the options, call `invokeai` with the `--help` argument:
|
||||
|
||||
```bash
|
||||
invokeai --help
|
||||
```
|
||||
|
||||
The script uses the readline library to allow for in-line editing, command
|
||||
history (++up++ and ++down++), autocompletion, and more. To help keep track of
|
||||
which prompts generated which images, the script writes a log file of image
|
||||
names and prompts to the selected output directory.
|
||||
|
||||
In addition, as of version 1.02, it also writes the prompt into the PNG file's
|
||||
metadata where it can be retrieved using `scripts/images2prompt.py`
|
||||
|
||||
The script is confirmed to work on Linux, Windows and Mac systems.
|
||||
|
||||
!!! note
|
||||
|
||||
This script runs from the command-line or can be used as a Web application. The Web GUI is
|
||||
currently rudimentary, but a much better replacement is on its way.
|
||||
Here is a typical session
|
||||
|
||||
```bash
|
||||
(invokeai) ~/stable-diffusion$ python3 ./scripts/invoke.py
|
||||
PS1:C:\Users\fred> invokeai
|
||||
* Initializing, be patient...
|
||||
Loading model from models/ldm/text2img-large/model.ckpt
|
||||
* Initializing, be patient...
|
||||
>> Initialization file /home/lstein/invokeai/invokeai.init found. Loading...
|
||||
>> Internet connectivity is True
|
||||
>> InvokeAI, version 2.3.0-rc5
|
||||
>> InvokeAI runtime directory is "/home/lstein/invokeai"
|
||||
>> GFPGAN Initialized
|
||||
>> CodeFormer Initialized
|
||||
>> ESRGAN Initialized
|
||||
>> Using device_type cuda
|
||||
>> xformers memory-efficient attention is available and enabled
|
||||
(...more initialization messages...)
|
||||
|
||||
* Initialization done! Awaiting your command...
|
||||
* Initialization done! Awaiting your command (-h for help, 'q' to quit)
|
||||
invoke> ashley judd riding a camel -n2 -s150
|
||||
Outputs:
|
||||
outputs/img-samples/00009.png: "ashley judd riding a camel" -n2 -s150 -S 416354203
|
||||
@ -47,27 +60,15 @@ invoke> "there's a fly in my soup" -n6 -g
|
||||
outputs/img-samples/00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
|
||||
seeds for individual rows: [2685670268, 1216708065, 2335773498, 822223658, 714542046, 3395302430]
|
||||
invoke> q
|
||||
|
||||
# this shows how to retrieve the prompt stored in the saved image's metadata
|
||||
(invokeai) ~/stable-diffusion$ python ./scripts/images2prompt.py outputs/img_samples/*.png
|
||||
00009.png: "ashley judd riding a camel" -s150 -S 416354203
|
||||
00010.png: "ashley judd riding a camel" -s150 -S 1362479620
|
||||
00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
|
||||
```
|
||||
|
||||
![invoke-py-demo](../assets/dream-py-demo.png)
|
||||
|
||||
The `invoke>` prompt's arguments are pretty much identical to those used in the
|
||||
Discord bot, except you don't need to type `!invoke` (it doesn't hurt if you
|
||||
do). A significant change is that creation of individual images is now the
|
||||
default unless `--grid` (`-g`) is given. A full list is given in
|
||||
[List of prompt arguments](#list-of-prompt-arguments).
|
||||
|
||||
## Arguments
|
||||
|
||||
The script itself also recognizes a series of command-line switches that will
|
||||
change important global defaults, such as the directory for image outputs and
|
||||
the location of the model weight files.
|
||||
The script recognizes a series of command-line switches that will
|
||||
change important global defaults, such as the directory for image
|
||||
outputs and the location of the model weight files.
|
||||
|
||||
### List of arguments recognized at the command line
|
||||
|
||||
@ -82,10 +83,14 @@ overridden on a per-prompt basis (see
|
||||
| `--outdir <path>` | `-o<path>` | `outputs/img_samples` | Location for generated images. |
|
||||
| `--prompt_as_dir` | `-p` | `False` | Name output directories using the prompt text. |
|
||||
| `--from_file <path>` | | `None` | Read list of prompts from a file. Use `-` to read from standard input |
|
||||
| `--model <modelname>` | | `stable-diffusion-1.4` | Loads model specified in configs/models.yaml. Currently one of "stable-diffusion-1.4" or "laion400m" |
|
||||
| `--full_precision` | `-F` | `False` | Run in slower full-precision mode. Needed for Macintosh M1/M2 hardware and some older video cards. |
|
||||
| `--model <modelname>` | | `stable-diffusion-1.5` | Loads the initial model specified in configs/models.yaml. |
|
||||
| `--ckpt_convert ` | | `False` | If provided both .ckpt and .safetensors files will be auto-converted into diffusers format in memory |
|
||||
| `--autoconvert <path>` | | `None` | On startup, scan the indicated directory for new .ckpt/.safetensor files and automatically convert and import them |
|
||||
| `--precision` | | `fp16` | Provide `fp32` for full precision mode, `fp16` for half-precision. `fp32` needed for Macintoshes and some NVidia cards. |
|
||||
| `--png_compression <0-9>` | `-z<0-9>` | `6` | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
|
||||
| `--safety-checker` | | `False` | Activate safety checker for NSFW and other potentially disturbing imagery |
|
||||
| `--patchmatch`, `--no-patchmatch` | | `--patchmatch` | Load/Don't load the PatchMatch inpainting extension |
|
||||
| `--xformers`, `--no-xformers` | | `--xformers` | Load/Don't load the Xformers memory-efficient attention module (CUDA only) |
|
||||
| `--web` | | `False` | Start in web server mode |
|
||||
| `--host <ip addr>` | | `localhost` | Which network interface web server should listen on. Set to 0.0.0.0 to listen on any. |
|
||||
| `--port <port>` | | `9090` | Which port web server should listen for requests on. |
|
||||
@ -109,6 +114,7 @@ overridden on a per-prompt basis (see
|
||||
|
||||
| Argument | Shortcut | Default | Description |
|
||||
|--------------------|------------|---------------------|--------------|
|
||||
| `--full_precision` | | `False` | Same as `--precision=fp32`|
|
||||
| `--weights <path>` | | `None` | Path to weights file; use `--model stable-diffusion-1.4` instead |
|
||||
| `--laion400m` | `-l` | `False` | Use older LAION400m weights; use `--model=laion400m` instead |
|
||||
|
||||
@ -336,8 +342,10 @@ useful for debugging the text masking process prior to inpainting with the
|
||||
|
||||
### Model selection and importation
|
||||
|
||||
The CLI allows you to add new models on the fly, as well as to switch among them
|
||||
rapidly without leaving the script.
|
||||
The CLI allows you to add new models on the fly, as well as to switch
|
||||
among them rapidly without leaving the script. There are several
|
||||
different model formats, each described in the [Model Installation
|
||||
Guide](../installation/050_INSTALLING_MODELS.md).
|
||||
|
||||
#### `!models`
|
||||
|
||||
@ -347,9 +355,9 @@ model is bold-faced
|
||||
Example:
|
||||
|
||||
<pre>
|
||||
laion400m not loaded <no description>
|
||||
<b>stable-diffusion-1.4 active Stable Diffusion v1.4</b>
|
||||
waifu-diffusion not loaded Waifu Diffusion v1.3
|
||||
inpainting-1.5 not loaded Stable Diffusion inpainting model
|
||||
<b>stable-diffusion-1.5 active Stable Diffusion v1.5</b>
|
||||
waifu-diffusion not loaded Waifu Diffusion v1.4
|
||||
</pre>
|
||||
|
||||
#### `!switch <model>`
|
||||
@ -361,43 +369,30 @@ Note how the second column of the `!models` table changes to `cached` after a
|
||||
model is first loaded, and that the long initialization step is not needed when
|
||||
loading a cached model.
|
||||
|
||||
<pre>
|
||||
invoke> !models
|
||||
laion400m not loaded <no description>
|
||||
<b>stable-diffusion-1.4 cached Stable Diffusion v1.4</b>
|
||||
waifu-diffusion active Waifu Diffusion v1.3
|
||||
#### `!import_model <hugging_face_repo_ID>`
|
||||
|
||||
invoke> !switch waifu-diffusion
|
||||
>> Caching model stable-diffusion-1.4 in system RAM
|
||||
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
|
||||
| LatentDiffusion: Running in eps-prediction mode
|
||||
| DiffusionWrapper has 859.52 M params.
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Using faster float16 precision
|
||||
>> Model loaded in 18.24s
|
||||
>> Max VRAM used to load the model: 2.17G
|
||||
>> Current VRAM usage:2.17G
|
||||
>> Setting Sampler to k_lms
|
||||
This imports and installs a `diffusers`-style model that is stored on
|
||||
the [HuggingFace Web Site](https://huggingface.co). You can look up
|
||||
any [Stable Diffusion diffusers
|
||||
model](https://huggingface.co/models?library=diffusers) and install it
|
||||
with a command like the following:
|
||||
|
||||
invoke> !models
|
||||
laion400m not loaded <no description>
|
||||
stable-diffusion-1.4 cached Stable Diffusion v1.4
|
||||
<b>waifu-diffusion active Waifu Diffusion v1.3</b>
|
||||
```bash
|
||||
!import_model prompthero/openjourney
|
||||
```
|
||||
|
||||
invoke> !switch stable-diffusion-1.4
|
||||
>> Caching model waifu-diffusion in system RAM
|
||||
>> Retrieving model stable-diffusion-1.4 from system RAM cache
|
||||
>> Setting Sampler to k_lms
|
||||
#### `!import_model <path/to/diffusers/directory>`
|
||||
|
||||
invoke> !models
|
||||
laion400m not loaded <no description>
|
||||
<b>stable-diffusion-1.4 active Stable Diffusion v1.4</b>
|
||||
waifu-diffusion cached Waifu Diffusion v1.3
|
||||
</pre>
|
||||
If you have a copy of a `diffusers`-style model saved to disk, you can
|
||||
import it by passing the path to model's top-level directory.
|
||||
|
||||
#### `!import_model <path/to/model/weights>`
|
||||
#### `!import_model <url>`
|
||||
|
||||
For a `.ckpt` or `.safetensors` file, if you have a direct download
|
||||
URL for the file, you can provide it to `!import_model` and the file
|
||||
will be downloaded and installed for you.
|
||||
|
||||
#### `!import_model <path/to/model/weights.ckpt>`
|
||||
|
||||
This command imports a new model weights file into InvokeAI, makes it available
|
||||
for image generation within the script, and writes out the configuration for the
|
||||
@ -417,35 +412,12 @@ below, the bold-faced text shows what the user typed in with the exception of
|
||||
the width, height and configuration file paths, which were filled in
|
||||
automatically.
|
||||
|
||||
Example:
|
||||
#### `!import_model <path/to/directory_of_models>`
|
||||
|
||||
<pre>
|
||||
invoke> <b>!import_model models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt</b>
|
||||
>> Model import in process. Please enter the values needed to configure this model:
|
||||
|
||||
Name for this model: <b>waifu-diffusion</b>
|
||||
Description of this model: <b>Waifu Diffusion v1.3</b>
|
||||
Configuration file for this model: <b>configs/stable-diffusion/v1-inference.yaml</b>
|
||||
Default image width: <b>512</b>
|
||||
Default image height: <b>512</b>
|
||||
>> New configuration:
|
||||
waifu-diffusion:
|
||||
config: configs/stable-diffusion/v1-inference.yaml
|
||||
description: Waifu Diffusion v1.3
|
||||
height: 512
|
||||
weights: models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
|
||||
width: 512
|
||||
OK to import [n]? <b>y</b>
|
||||
>> Caching model stable-diffusion-1.4 in system RAM
|
||||
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
|
||||
| LatentDiffusion: Running in eps-prediction mode
|
||||
| DiffusionWrapper has 859.52 M params.
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Using faster float16 precision
|
||||
invoke>
|
||||
</pre>
|
||||
If you provide the path of a directory that contains one or more
|
||||
`.ckpt` or `.safetensors` files, the CLI will scan the directory and
|
||||
interactively offer to import the models it finds there. Also see the
|
||||
`--autoconvert` command-line option.
|
||||
|
||||
#### `!edit_model <name_of_model>`
|
||||
|
||||
@ -479,11 +451,6 @@ OK to import [n]? y
|
||||
...
|
||||
</pre>
|
||||
|
||||
======= invoke> !fix 000017.4829112.gfpgan-00.png --embiggen 3 ...lots of
|
||||
text... Outputs: [2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix
|
||||
"outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512
|
||||
-H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 ```
|
||||
|
||||
### History processing
|
||||
|
||||
The CLI provides a series of convenient commands for reviewing previous actions,
|
||||
|
@ -4,13 +4,24 @@ title: Image-to-Image
|
||||
|
||||
# :material-image-multiple: Image-to-Image
|
||||
|
||||
## `img2img`
|
||||
Both the Web and command-line interfaces provide an "img2img" feature
|
||||
that lets you seed your creations with an initial drawing or
|
||||
photo. This is a really cool feature that tells stable diffusion to
|
||||
build the prompt on top of the image you provide, preserving the
|
||||
original's basic shape and layout.
|
||||
|
||||
This script also provides an `img2img` feature that lets you seed your creations
|
||||
with an initial drawing or photo. This is a really cool feature that tells
|
||||
stable diffusion to build the prompt on top of the image you provide, preserving
|
||||
the original's basic shape and layout. To use it, provide the `--init_img`
|
||||
option as shown here:
|
||||
See the [WebUI Guide](WEB.md) for a walkthrough of the img2img feature
|
||||
in the InvokeAI web server. This document describes how to use img2img
|
||||
in the command-line tool.
|
||||
|
||||
## Basic Usage
|
||||
|
||||
Launch the command-line client by launching `invoke.sh`/`invoke.bat`
|
||||
and choosing option (1). Alternative, activate the InvokeAI
|
||||
environment and issue the command `invokeai`.
|
||||
|
||||
Once the `invoke> ` prompt appears, you can start an img2img render by
|
||||
pointing to a seed file with the `-I` option as shown here:
|
||||
|
||||
!!! example ""
|
||||
|
||||
|
@ -54,8 +54,7 @@ Please enter 1, 2, 3, or 4: [1] 3
|
||||
```
|
||||
|
||||
From the command line, with the InvokeAI virtual environment active,
|
||||
you can launch the front end with the command `textual_inversion
|
||||
--gui`.
|
||||
you can launch the front end with the command `invokeai-ti --gui`.
|
||||
|
||||
This will launch a text-based front end that will look like this:
|
||||
|
||||
@ -227,12 +226,12 @@ It accepts a large number of arguments, which can be summarized by
|
||||
passing the `--help` argument:
|
||||
|
||||
```sh
|
||||
textual_inversion --help
|
||||
invokeai-ti --help
|
||||
```
|
||||
|
||||
Typical usage is shown here:
|
||||
```sh
|
||||
textual_inversion \
|
||||
invokeai-ti \
|
||||
--model=stable-diffusion-1.5 \
|
||||
--resolution=512 \
|
||||
--learnable_property=style \
|
||||
|
@ -5,11 +5,14 @@ title: InvokeAI Web Server
|
||||
# :material-web: InvokeAI Web Server
|
||||
|
||||
As of version 2.0.0, this distribution comes with a full-featured web server
|
||||
(see screenshot). To use it, run the `invoke.py` script by adding the `--web`
|
||||
option:
|
||||
(see screenshot).
|
||||
|
||||
To use it, launch the `invoke.sh`/`invoke.bat` script and select
|
||||
option (2). Alternatively, with the InvokeAI environment active, run
|
||||
the `invokeai` script by adding the `--web` option:
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web
|
||||
invokeai --web
|
||||
```
|
||||
|
||||
You can then connect to the server by pointing your web browser at
|
||||
@ -19,17 +22,23 @@ address of the host you are running it on, or the wildcard `0.0.0.0`. For
|
||||
example:
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web --host 0.0.0.0
|
||||
invoke.sh --host 0.0.0.0
|
||||
```
|
||||
|
||||
## Quick guided walkthrough of the WebGUI's features
|
||||
or
|
||||
|
||||
While most of the WebGUI's features are intuitive, here is a guided walkthrough
|
||||
```bash
|
||||
invokeai --web --host 0.0.0.0
|
||||
```
|
||||
|
||||
## Quick guided walkthrough of the WebUI's features
|
||||
|
||||
While most of the WebUI's features are intuitive, here is a guided walkthrough
|
||||
through its various components.
|
||||
|
||||
![Invoke Web Server - Major Components](../assets/invoke-web-server-1.png){:width="640px"}
|
||||
|
||||
The screenshot above shows the Text to Image tab of the WebGUI. There are three
|
||||
The screenshot above shows the Text to Image tab of the WebUI. There are three
|
||||
main sections:
|
||||
|
||||
1. A **control panel** on the left, which contains various settings for text to
|
||||
@ -63,12 +72,14 @@ From top to bottom, these are:
|
||||
1. Text to Image - generate images from text
|
||||
2. Image to Image - from an uploaded starting image (drawing or photograph)
|
||||
generate a new one, modified by the text prompt
|
||||
3. Inpainting (pending) - Interactively erase portions of a starting image and
|
||||
have the AI fill in the erased region from a text prompt.
|
||||
4. Outpainting (pending) - Interactively add blank space to the borders of a
|
||||
starting image and fill in the background from a text prompt.
|
||||
5. Postprocessing (pending) - Interactively postprocess generated images using a
|
||||
variety of filters.
|
||||
3. Unified Canvas - Interactively combine multiple images, extend them
|
||||
with outpainting,and modify interior portions of the image with
|
||||
inpainting, erase portions of a starting image and have the AI fill in
|
||||
the erased region from a text prompt.
|
||||
4. Workflow Management (not yet implemented) - this panel will allow you to create
|
||||
pipelines of common operations and combine them into workflows.
|
||||
5. Training (not yet implemented) - this panel will provide an interface to [textual
|
||||
inversion training](TEXTUAL_INVERSION.md) and fine tuning.
|
||||
|
||||
The inpainting, outpainting and postprocessing tabs are currently in
|
||||
development. However, limited versions of their features can already be accessed
|
||||
@ -76,18 +87,18 @@ through the Text to Image and Image to Image tabs.
|
||||
|
||||
## Walkthrough
|
||||
|
||||
The following walkthrough will exercise most (but not all) of the WebGUI's
|
||||
The following walkthrough will exercise most (but not all) of the WebUI's
|
||||
feature set.
|
||||
|
||||
### Text to Image
|
||||
|
||||
1. Launch the WebGUI using `python scripts/invoke.py --web` and connect to it
|
||||
1. Launch the WebUI using `python scripts/invoke.py --web` and connect to it
|
||||
with your browser by accessing `http://localhost:9090`. If the browser and
|
||||
server are running on different machines on your LAN, add the option
|
||||
`--host 0.0.0.0` to the launch command line and connect to the machine
|
||||
hosting the web server using its IP address or domain name.
|
||||
|
||||
2. If all goes well, the WebGUI should come up and you'll see a green
|
||||
2. If all goes well, the WebUI should come up and you'll see a green
|
||||
`connected` message on the upper right.
|
||||
|
||||
#### Basics
|
||||
@ -234,7 +245,7 @@ walkthrough.
|
||||
|
||||
2. Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or click
|
||||
the blank area to get an upload dialog. The image will load into an area
|
||||
marked _Initial Image_. (The WebGUI will also load the most
|
||||
marked _Initial Image_. (The WebUI will also load the most
|
||||
recently-generated image from the gallery into a section on the left, but
|
||||
this image will be replaced in the next step.)
|
||||
|
||||
@ -284,13 +295,17 @@ initial image" icons are located.
|
||||
|
||||
![Invoke Web Server - Use as Image Links](../assets/invoke-web-server-9.png){:width="640px"}
|
||||
|
||||
### Unified Canvas
|
||||
|
||||
See the [Unified Canvas Guide](UNIFIED_CANVAS.md)
|
||||
|
||||
## Parting remarks
|
||||
|
||||
This concludes the walkthrough, but there are several more features that you can
|
||||
explore. Please check out the [Command Line Interface](CLI.md) documentation for
|
||||
further explanation of the advanced features that were not covered here.
|
||||
|
||||
The WebGUI is only rapid development. Check back regularly for updates!
|
||||
The WebUI is only rapid development. Check back regularly for updates!
|
||||
|
||||
## Reference
|
||||
|
||||
|
@ -2,4 +2,62 @@
|
||||
title: Overview
|
||||
---
|
||||
|
||||
Here you can find the documentation for different features.
|
||||
Here you can find the documentation for InvokeAI's various features.
|
||||
|
||||
## The Basics
|
||||
### * The [Web User Interface](WEB.md)
|
||||
Guide to the Web interface. Also see the [WebUI Hotkeys Reference Guide](WEBUIHOTKEYS.md)
|
||||
|
||||
### * The [Unified Canvas](UNIFIED_CANVAS.md)
|
||||
Build complex scenes by combine and modifying multiple images in a stepwise
|
||||
fashion. This feature combines img2img, inpainting and outpainting in
|
||||
a single convenient digital artist-optimized user interface.
|
||||
|
||||
### * The [Command Line Interface (CLI)](CLI.md)
|
||||
Scriptable access to InvokeAI's features.
|
||||
|
||||
## Image Generation
|
||||
### * [Prompt Engineering](PROMPTS.md)
|
||||
Get the images you want with the InvokeAI prompt engineering language.
|
||||
|
||||
## * [Post-Processing](POSTPROCESS.md)
|
||||
Restore mangled faces and make images larger with upscaling. Also see the [Embiggen Upscaling Guide](EMBIGGEN.md).
|
||||
|
||||
## * The [Concepts Library](CONCEPTS.md)
|
||||
Add custom subjects and styles using HuggingFace's repository of embeddings.
|
||||
|
||||
### * [Image-to-Image Guide for the CLI](IMG2IMG.md)
|
||||
Use a seed image to build new creations in the CLI.
|
||||
|
||||
### * [Inpainting Guide for the CLI](INPAINTING.md)
|
||||
Selectively erase and replace portions of an existing image in the CLI.
|
||||
|
||||
### * [Outpainting Guide for the CLI](OUTPAINTING.md)
|
||||
Extend the borders of the image with an "outcrop" function within the CLI.
|
||||
|
||||
### * [Generating Variations](VARIATIONS.md)
|
||||
Have an image you like and want to generate many more like it? Variations
|
||||
are the ticket.
|
||||
|
||||
## Model Management
|
||||
|
||||
## * [Model Installation](../installation/050_INSTALLING_MODELS.md)
|
||||
Learn how to import third-party models and switch among them. This
|
||||
guide also covers optimizing models to load quickly.
|
||||
|
||||
## * [Merging Models](MODEL_MERGING.md)
|
||||
Teach an old model new tricks. Merge 2-3 models together to create a
|
||||
new model that combines characteristics of the originals.
|
||||
|
||||
## * [Textual Inversion](TEXTUAL_INVERSION.md)
|
||||
Personalize models by adding your own style or subjects.
|
||||
|
||||
# Other Features
|
||||
|
||||
## * [The NSFW Checker](NSFW.md)
|
||||
Prevent InvokeAI from displaying unwanted racy images.
|
||||
|
||||
## * [Miscellaneous](OTHER.md)
|
||||
Run InvokeAI on Google Colab, generate images with repeating patterns,
|
||||
batch process a file of prompts, increase the "creativity" of image
|
||||
generation by adding initial noise, and more!
|
||||
|
237
docs/index.md
@ -81,28 +81,6 @@ Q&A</a>]
|
||||
|
||||
This fork is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
|
||||
|
||||
## :octicons-package-dependencies-24: Installation
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
||||
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
First time users, please see
|
||||
[Automated Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
|
||||
getting InvokeAI up and running on your system. For alternative installation and
|
||||
upgrade instructions, please see:
|
||||
[InvokeAI Installation Overview](installation/)
|
||||
|
||||
Users who wish to make use of the **PyPatchMatch** inpainting functions
|
||||
will need to perform a bit of extra work to enable this
|
||||
module. Instructions can be found at [Installing
|
||||
PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md).
|
||||
|
||||
If you have an NVIDIA card, you can benefit from the significant
|
||||
memory savings and performance benefits provided by Facebook Lab's
|
||||
**xFormers** module. Instructions for Linux and Windows users can be found
|
||||
at [Installing xFormers](installation/070_INSTALL_XFORMERS.md).
|
||||
|
||||
## :fontawesome-solid-computer: Hardware Requirements
|
||||
|
||||
### :octicons-cpu-24: System
|
||||
@ -122,141 +100,146 @@ images in full-precision mode:
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
|
||||
### :fontawesome-solid-memory: Memory
|
||||
### :fontawesome-solid-memory: Memory and Disk
|
||||
|
||||
- At least 12 GB Main Memory RAM.
|
||||
|
||||
### :fontawesome-regular-hard-drive: Disk
|
||||
|
||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||
all its dependencies.
|
||||
|
||||
!!! info
|
||||
## :octicons-package-dependencies-24: Installation
|
||||
|
||||
Precision is auto configured based on the device. If however you encounter errors like
|
||||
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
||||
`invoke.py` with the `--precision=float32` flag:
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
||||
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||
driver).
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
|
||||
```
|
||||
### [Installation Getting Started Guide](installation)
|
||||
#### [Automated Installer](installation/010_INSTALL_AUTOMATED.md)
|
||||
This method is recommended for 1st time users
|
||||
#### [Manual Installation](installation/020_INSTALL_MANUAL.md)
|
||||
This method is recommended for experienced users and developers
|
||||
#### [Docker Installation](installation/040_INSTALL_DOCKER.md)
|
||||
This method is recommended for those familiar with running Docker containers
|
||||
### Other Installation Guides
|
||||
- [PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md)
|
||||
- [XFormers](installation/070_INSTALL_XFORMERS.md)
|
||||
- [CUDA and ROCm Drivers](installation/030_INSTALL_CUDA_AND_ROCM.md)
|
||||
- [Installing New Models](installation/050_INSTALLING_MODELS.md)
|
||||
|
||||
## :octicons-gift-24: InvokeAI Features
|
||||
|
||||
- [The InvokeAI Web Interface](features/WEB.md) -
|
||||
[WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md) -
|
||||
[WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||
<!-- seperator -->
|
||||
- [The Command Line Interace](features/CLI.md) -
|
||||
[Image2Image](features/IMG2IMG.md) - [Inpainting](features/INPAINTING.md) -
|
||||
[Outpainting](features/OUTPAINTING.md) -
|
||||
[Adding custom styles and subjects](features/CONCEPTS.md) -
|
||||
[Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||
<!-- seperator -->
|
||||
- [Generating Variations](features/VARIATIONS.md)
|
||||
<!-- seperator -->
|
||||
- [Prompt Engineering](features/PROMPTS.md)
|
||||
<!-- seperator -->
|
||||
- [Model Merging](features/MODEL_MERGING.md)
|
||||
<!-- seperator -->
|
||||
- Miscellaneous
|
||||
- [NSFW Checker](features/NSFW.md)
|
||||
### The InvokeAI Web Interface
|
||||
- [WebUI overview](features/WEB.md)
|
||||
- [WebUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
- [WebUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||
<!-- separator -->
|
||||
### The InvokeAI Command Line Interface
|
||||
- [Command Line Interace Reference Guide](features/CLI.md)
|
||||
<!-- separator -->
|
||||
### Image Management
|
||||
- [Image2Image](features/IMG2IMG.md)
|
||||
- [Inpainting](features/INPAINTING.md)
|
||||
- [Outpainting](features/OUTPAINTING.md)
|
||||
- [Adding custom styles and subjects](features/CONCEPTS.md)
|
||||
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||
- [Other](features/OTHER.md)
|
||||
- [Other Features](features/OTHER.md)
|
||||
|
||||
<!-- separator -->
|
||||
### Model Management
|
||||
- [Installing](installation/050_INSTALLING_MODELS.md)
|
||||
- [Model Merging](features/MODEL_MERGING.md)
|
||||
- [Style/Subject Concepts and Embeddings](features/CONCEPTS.md)
|
||||
- [Textual Inversion](features/TEXTUAL_INVERSION.md)
|
||||
- [Not Safe for Work (NSFW) Checker](features/NSFW.md)
|
||||
<!-- seperator -->
|
||||
### Prompt Engineering
|
||||
- [Prompt Syntax](features/PROMPTS.md)
|
||||
- [Generating Variations](features/VARIATIONS.md)
|
||||
|
||||
## :octicons-log-16: Latest Changes
|
||||
|
||||
### v2.2.4 <small>(11 December 2022)</small>
|
||||
### v2.3.0 <small>(9 February 2023)</small>
|
||||
|
||||
#### the `invokeai` directory
|
||||
#### Migration to Stable Diffusion `diffusers` models
|
||||
|
||||
Previously there were two directories to worry about, the directory that
|
||||
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
|
||||
directory that contained the models files, embeddings, configuration and
|
||||
outputs. With the 2.2.4 release, this dual system is done away with, and
|
||||
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
|
||||
live in a directory named `invokeai`. By default this directory is located in
|
||||
your home directory (e.g. `\Users\yourname` on Windows), but you can select
|
||||
where it goes at install time.
|
||||
Previous versions of InvokeAI supported the original model file format introduced with Stable Diffusion 1.4. In the original format, known variously as "checkpoint", or "legacy" format, there is a single large weights file ending with `.ckpt` or `.safetensors`. Though this format has served the community well, it has a number of disadvantages, including file size, slow loading times, and a variety of non-standard variants that require special-case code to handle. In addition, because checkpoint files are actually a bundle of multiple machine learning sub-models, it is hard to swap different sub-models in and out, or to share common sub-models. A new format, introduced by the StabilityAI company in collaboration with HuggingFace, is called `diffusers` and consists of a directory of individual models. The most immediate benefit of `diffusers` is that they load from disk very quickly. A longer term benefit is that in the near future `diffusers` models will be able to share common sub-models, dramatically reducing disk space when you have multiple fine-tune models derived from the same base.
|
||||
|
||||
After installation, you can delete the install directory (the one that the zip
|
||||
file creates when it unpacks). Do **not** delete or move the `invokeai`
|
||||
directory!
|
||||
When you perform a new install of version 2.3.0, you will be offered the option to install the `diffusers` versions of a number of popular SD models, including Stable Diffusion versions 1.5 and 2.1 (including the 768x768 pixel version of 2.1). These will act and work just like the checkpoint versions. Do not be concerned if you already have a lot of ".ckpt" or ".safetensors" models on disk! InvokeAI 2.3.0 can still load these and generate images from them without any extra intervention on your part.
|
||||
|
||||
##### Initialization file `invokeai/invokeai.init`
|
||||
To take advantage of the optimized loading times of `diffusers` models, InvokeAI offers options to convert legacy checkpoint models into optimized `diffusers` models. If you use the `invokeai` command line interface, the relevant commands are:
|
||||
|
||||
You can place frequently-used startup options in this file, such as the default
|
||||
number of steps or your preferred sampler. To keep everything in one place, this
|
||||
file has now been moved into the `invokeai` directory and is named
|
||||
`invokeai.init`.
|
||||
* `!convert_model` -- Take the path to a local checkpoint file or a URL that is pointing to one, convert it into a `diffusers` model, and import it into InvokeAI's models registry file.
|
||||
* `!optimize_model` -- If you already have a checkpoint model in your InvokeAI models file, this command will accept its short name and convert it into a like-named `diffusers` model, optionally deleting the original checkpoint file.
|
||||
* `!import_model` -- Take the local path of either a checkpoint file or a `diffusers` model directory and import it into InvokeAI's registry file. You may also provide the ID of any diffusers model that has been published on the [HuggingFace models repository](https://huggingface.co/models?pipeline_tag=text-to-image&sort=downloads) and it will be downloaded and installed automatically.
|
||||
|
||||
#### To update from Version 2.2.3
|
||||
The WebGUI offers similar functionality for model management.
|
||||
|
||||
The easiest route is to download and unpack one of the 2.2.4 installer files.
|
||||
When it asks you for the location of the `invokeai` runtime directory, respond
|
||||
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
|
||||
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
|
||||
and answer "Y" when asked if you want to reuse the directory.
|
||||
For advanced users, new command-line options provide additional functionality. Launching `invokeai` with the argument `--autoconvert <path to directory>` takes the path to a directory of checkpoint files, automatically converts them into `diffusers` models and imports them. Each time the script is launched, the directory will be scanned for new checkpoint files to be loaded. Alternatively, the `--ckpt_convert` argument will cause any checkpoint or safetensors model that is already registered with InvokeAI to be converted into a `diffusers` model on the fly, allowing you to take advantage of future diffusers-only features without explicitly converting the model and saving it to disk.
|
||||
|
||||
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
|
||||
does not know about the new directory layout and won't be fully functional.
|
||||
Please see [INSTALLING MODELS](https://invoke-ai.github.io/InvokeAI/installation/050_INSTALLING_MODELS/) for more information on model management in both the command-line and Web interfaces.
|
||||
|
||||
#### To update to 2.2.5 (and beyond) there's now an update path.
|
||||
#### Support for the `XFormers` Memory-Efficient Crossattention Package
|
||||
|
||||
As they become available, you can update to more recent versions of InvokeAI
|
||||
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
|
||||
Running it without any arguments will install the most recent version of
|
||||
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
|
||||
script with an argument in the command shell. This syntax accepts the path to
|
||||
the desired release's zip file, which you can find by clicking on the green
|
||||
"Code" button on this repository's home page.
|
||||
On CUDA (Nvidia) systems, version 2.3.0 supports the `XFormers` library. Once installed, the`xformers` package dramatically reduces the memory footprint of loaded Stable Diffusion models files and modestly increases image generation speed. `xformers` will be installed and activated automatically if you specify a CUDA system at install time.
|
||||
|
||||
#### Other 2.2.4 Improvements
|
||||
The caveat with using `xformers` is that it introduces slightly non-deterministic behavior, and images generated using the same seed and other settings will be subtly different between invocations. Generally the changes are unnoticeable unless you rapidly shift back and forth between images, but to disable `xformers` and restore fully deterministic behavior, you may launch InvokeAI using the `--no-xformers` option. This is most conveniently done by opening the file `invokeai/invokeai.init` with a text editor, and adding the line `--no-xformers` at the bottom.
|
||||
|
||||
- Fix InvokeAI GUI initialization by @addianto in #1687
|
||||
- fix link in documentation by @lstein in #1728
|
||||
- Fix broken link by @ShawnZhong in #1736
|
||||
- Remove reference to binary installer by @lstein in #1731
|
||||
- documentation fixes for 2.2.3 by @lstein in #1740
|
||||
- Modify installer links to point closer to the source installer by @ebr in
|
||||
#1745
|
||||
- add documentation warning about 1650/60 cards by @lstein in #1753
|
||||
- Fix Linux source URL in installation docs by @andybearman in #1756
|
||||
- Make install instructions discoverable in readme by @damian0815 in #1752
|
||||
- typo fix by @ofirkris in #1755
|
||||
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
|
||||
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
|
||||
in #1765
|
||||
- stability and usage improvements to binary & source installers by @lstein in
|
||||
#1760
|
||||
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
|
||||
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
|
||||
- invoke script cds to its location before running by @lstein in #1805
|
||||
- Make PaperCut and VoxelArt models load again by @lstein in #1730
|
||||
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
|
||||
#1817
|
||||
- Clean up readme by @hipsterusername in #1820
|
||||
- Optimized Docker build with support for external working directory by @ebr in
|
||||
#1544
|
||||
- disable pushing the cloud container by @mauwii in #1831
|
||||
- Fix docker push github action and expand with additional metadata by @ebr in
|
||||
#1837
|
||||
- Fix Broken Link To Notebook by @VedantMadane in #1821
|
||||
- Account for flat models by @spezialspezial in #1766
|
||||
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
|
||||
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
|
||||
@SammCheese in #1848
|
||||
- Make force free GPU memory work in img2img by @addianto in #1844
|
||||
- New installer by @lstein
|
||||
#### A Negative Prompt Box in the WebUI
|
||||
|
||||
There is now a separate text input box for negative prompts in the WebUI. This is convenient for stashing frequently-used negative prompts ("mangled limbs, bad anatomy"). The `[negative prompt]` syntax continues to work in the main prompt box as well.
|
||||
|
||||
To see exactly how your prompts are being parsed, launch `invokeai` with the `--log_tokenization` option. The console window will then display the tokenization process for both positive and negative prompts.
|
||||
|
||||
#### Model Merging
|
||||
|
||||
Version 2.3.0 offers an intuitive user interface for merging up to three Stable Diffusion models using an intuitive user interface. Model merging allows you to mix the behavior of models to achieve very interesting effects. To use this, each of the models must already be imported into InvokeAI and saved in `diffusers` format, then launch the merger using a new menu item in the InvokeAI launcher script (`invoke.sh`, `invoke.bat`) or directly from the command line with `invokeai-merge --gui`. You will be prompted to select the models to merge, the proportions in which to mix them, and the mixing algorithm. The script will create a new merged `diffusers` model and import it into InvokeAI for your use.
|
||||
|
||||
See [MODEL MERGING](https://invoke-ai.github.io/InvokeAI/features/MODEL_MERGING/) for more details.
|
||||
|
||||
#### Textual Inversion Training
|
||||
|
||||
Textual Inversion (TI) is a technique for training a Stable Diffusion model to emit a particular subject or style when triggered by a keyword phrase. You can perform TI training by placing a small number of images of the subject or style in a directory, and choosing a distinctive trigger phrase, such as "pointillist-style". After successful training, The subject or style will be activated by including `<pointillist-style>` in your prompt.
|
||||
|
||||
Previous versions of InvokeAI were able to perform TI, but it required using a command-line script with dozens of obscure command-line arguments. Version 2.3.0 features an intuitive TI frontend that will build a TI model on top of any `diffusers` model. To access training you can launch from a new item in the launcher script or from the command line using `invokeai-ti --gui`.
|
||||
|
||||
See [TEXTUAL INVERSION](https://invoke-ai.github.io/InvokeAI/features/TEXTUAL_INVERSION/) for further details.
|
||||
|
||||
#### A New Installer Experience
|
||||
|
||||
The InvokeAI installer has been upgraded in order to provide a smoother and hopefully more glitch-free experience. In addition, InvokeAI is now packaged as a PyPi project, allowing developers and power-users to install InvokeAI with the command `pip install InvokeAI --use-pep517`. Please see [Installation](#installation) for details.
|
||||
|
||||
Developers should be aware that the `pip` installation procedure has been simplified and that the `conda` method is no longer supported at all. Accordingly, the `environments_and_requirements` directory has been deleted from the repository.
|
||||
|
||||
#### Command-line name changes
|
||||
|
||||
All of InvokeAI's functionality, including the WebUI, command-line interface, textual inversion training and model merging, can all be accessed from the `invoke.sh` and `invoke.bat` launcher scripts. The menu of options has been expanded to add the new functionality. For the convenience of developers and power users, we have normalized the names of the InvokeAI command-line scripts:
|
||||
|
||||
* `invokeai` -- Command-line client
|
||||
* `invokeai --web` -- Web GUI
|
||||
* `invokeai-merge --gui` -- Model merging script with graphical front end
|
||||
* `invokeai-ti --gui` -- Textual inversion script with graphical front end
|
||||
* `invokeai-configure` -- Configuration tool for initializing the `invokeai` directory and selecting popular starter models.
|
||||
|
||||
For backward compatibility, the old command names are also recognized, including `invoke.py` and `configure-invokeai.py`. However, these are deprecated and will eventually be removed.
|
||||
|
||||
Developers should be aware that the locations of the script's source code has been moved. The new locations are:
|
||||
* `invokeai` => `ldm/invoke/CLI.py`
|
||||
* `invokeai-configure` => `ldm/invoke/config/configure_invokeai.py`
|
||||
* `invokeai-ti`=> `ldm/invoke/training/textual_inversion.py`
|
||||
* `invokeai-merge` => `ldm/invoke/merge_diffusers`
|
||||
|
||||
Developers are strongly encouraged to perform an "editable" install of InvokeAI using `pip install -e . --use-pep517` in the Git repository, and then to call the scripts using their 2.3.0 names, rather than executing the scripts directly. Developers should also be aware that the several important data files have been relocated into a new directory named `invokeai`. This includes the WebGUI's `frontend` and `backend` directories, and the `INITIAL_MODELS.yaml` files used by the installer to select starter models. Eventually all InvokeAI modules will be in subdirectories of `invokeai`.
|
||||
|
||||
Please see [2.3.0 Release Notes](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.3.0) for further details.
|
||||
For older changelogs, please visit the
|
||||
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
|
||||
|
||||
## :material-target: Troubleshooting
|
||||
|
||||
Please check out our
|
||||
**[:material-frequently-asked-questions: Q&A](help/TROUBLESHOOT.md)** to get
|
||||
solutions for common installation problems and other issues.
|
||||
Please check out our **[:material-frequently-asked-questions:
|
||||
Troubleshooting
|
||||
Guide](installation/010_INSTALL_AUTOMATED.md#troubleshooting)** to
|
||||
get solutions for common installation problems and other issues.
|
||||
|
||||
## :octicons-repo-push-24: Contributing
|
||||
|
||||
@ -282,8 +265,8 @@ thank them for their time, hard work and effort.
|
||||
For support, please use this repository's GitHub Issues tracking service. Feel
|
||||
free to send me an email if you use and like the script.
|
||||
|
||||
Original portions of the software are Copyright (c) 2020
|
||||
[Lincoln D. Stein](https://github.com/lstein)
|
||||
Original portions of the software are Copyright (c) 2022-23
|
||||
by [The InvokeAI Team](https://github.com/invoke-ai).
|
||||
|
||||
## :octicons-book-24: Further Reading
|
||||
|
||||
|
@ -6,81 +6,76 @@ title: Installing with the Automated Installer
|
||||
|
||||
## Introduction
|
||||
|
||||
The automated installer is a shell script that attempts to automate every step
|
||||
needed to install and run InvokeAI on a stock computer running recent versions
|
||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
The automated installer is a Python script that automates the steps
|
||||
needed to install and run InvokeAI on a stock computer running recent
|
||||
versions of Linux, MacOS or Windows. It will leave you with a version
|
||||
that runs a stable version of InvokeAI with the option to upgrade to
|
||||
experimental versions later.
|
||||
|
||||
## Walk through
|
||||
|
||||
1. Make sure that your system meets the
|
||||
[hardware requirements](../index.md#hardware-requirements) and has the
|
||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
||||
with an AMD GPU installed, you may need to install the
|
||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
1. <a name="hardware_requirements">**Hardware Requirements**: </a>Make sure that your system meets the [hardware
|
||||
requirements](../index.md#hardware-requirements) and has the
|
||||
appropriate GPU drivers installed. For a system with an NVIDIA
|
||||
card installed, you will need to install the CUDA driver, while
|
||||
AMD-based cards require the ROCm driver. In most cases, if you've
|
||||
already used the system for gaming or other graphics-intensive
|
||||
tasks, the appropriate drivers will already be installed. If
|
||||
unsure, check the [GPU Driver Guide](030_INSTALL_CUDA_AND_ROCM.md)
|
||||
|
||||
!!! info "Required Space"
|
||||
|
||||
Installation requires roughly 18G of free disk space to load the libraries and
|
||||
recommended model weights files.
|
||||
Installation requires roughly 18G of free disk space to load
|
||||
the libraries and recommended model weights files.
|
||||
|
||||
Regardless of your destination disk, your *system drive* (`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB of free disk space to download and cache python dependencies. NOTE for Linux users: if your temporary directory is mounted as a `tmpfs`, ensure it has sufficient space.
|
||||
Regardless of your destination disk, your *system drive*
|
||||
(`C:\` on Windows, `/` on macOS/Linux) requires at least 6GB
|
||||
of free disk space to download and cache python
|
||||
dependencies.
|
||||
|
||||
2. Check that your system has an up-to-date Python installed. To do this, open
|
||||
up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
|
||||
"Powershell" on Windows) and type `python --version`. If Python is
|
||||
installed, it will print out the version number. If it is version `3.9.1` or `3.10.x`, you meet requirements.
|
||||
NOTE for Linux users: if your temporary directory is mounted
|
||||
as a `tmpfs`, ensure it has sufficient space.
|
||||
|
||||
!!! warning "At this time we do not recommend Python 3.11"
|
||||
2. <a name="software_requirements">**Software Requirements**: </a>Check that your system has an up-to-date Python installed. To do
|
||||
this, open up a command-line window ("Terminal" on Linux and
|
||||
Macintosh, "Command" or "Powershell" on Windows) and type `python
|
||||
--version`. If Python is installed, it will print out the version
|
||||
number. If it is version `3.9.1` or `3.10.x`, you meet
|
||||
requirements.
|
||||
|
||||
!!! warning "If you see an older version, or get a command not found error"
|
||||
|
||||
Go to [Python Downloads](https://www.python.org/downloads/) and
|
||||
download the appropriate installer package for your platform. We recommend
|
||||
[Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||
which has been extensively tested with InvokeAI.
|
||||
!!! warning "What to do if you have an unsupported version"
|
||||
|
||||
Go to [Python Downloads](https://www.python.org/downloads/)
|
||||
and download the appropriate installer package for your
|
||||
platform. We recommend [Version
|
||||
3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||
which has been extensively tested with InvokeAI. At this time
|
||||
we do not recommend Python 3.11.
|
||||
|
||||
_Please select your platform in the section below for platform-specific
|
||||
setup requirements._
|
||||
|
||||
=== "Windows users"
|
||||
=== "Windows"
|
||||
During the Python configuration process, look out for a
|
||||
checkbox to add Python to your PATH and select it. If the
|
||||
install script complains that it can't find python, then open
|
||||
the Python installer again and choose "Modify" existing
|
||||
installation.
|
||||
|
||||
- During the Python configuration process,
|
||||
look out for a checkbox to add Python to your PATH
|
||||
and select it. If the install script complains that it can't
|
||||
find python, then open the Python installer again and choose
|
||||
"Modify" existing installation.
|
||||
Installation requires an up to date version of the Microsoft
|
||||
Visual C libraries. Please install the 2015-2022 libraries
|
||||
available here:
|
||||
https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
||||
|
||||
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
||||
Please double-click on the file `WinLongPathsEnabled.reg` and
|
||||
accept the dialog box that asks you if you wish to modify your registry.
|
||||
This activates long filename support on your system and will prevent
|
||||
mysterious errors during installation.
|
||||
|
||||
=== "Mac users"
|
||||
|
||||
- After installing Python, you may need to run the
|
||||
following command from the Terminal in order to install the Web
|
||||
certificates needed to download model data from https sites. If
|
||||
you see lots of CERTIFICATE ERRORS during the last part of the
|
||||
install, this is the problem, and you can fix it with this command:
|
||||
|
||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
|
||||
- You may need to install the Xcode command line tools. These
|
||||
are a set of tools that are needed to run certain applications in a
|
||||
Terminal, including InvokeAI. This package is provided directly by Apple.
|
||||
|
||||
- To install, open a terminal window and run `xcode-select
|
||||
--install`. You will get a macOS system popup guiding you through the
|
||||
install. If you already have them installed, you will instead see some
|
||||
output in the Terminal advising you that the tools are already installed.
|
||||
|
||||
- More information can be found here:
|
||||
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
||||
|
||||
=== "Linux users"
|
||||
|
||||
For reasons that are not entirely clear, installing the correct version of Python can be a bit of a challenge on Ubuntu, Linux Mint, Pop!_OS, and other Debian-derived distributions.
|
||||
|
||||
On Ubuntu 22.04 and higher, run the following:
|
||||
=== "Linux"
|
||||
To install an appropriate version of Python on Ubuntu 22.04
|
||||
and higher, run the following:
|
||||
|
||||
```
|
||||
sudo apt update
|
||||
@ -98,63 +93,75 @@ version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
sudo update-alternatives --install /usr/local/bin/python python /usr/bin/python3.10 3
|
||||
```
|
||||
|
||||
Both `python` and `python3` commands are now pointing at Python3.10. You can still access older versions of Python by calling `python2`, `python3.8`, etc.
|
||||
Both `python` and `python3` commands are now pointing at
|
||||
Python3.10. You can still access older versions of Python by
|
||||
calling `python2`, `python3.8`, etc.
|
||||
|
||||
Linux systems require a couple of additional graphics libraries to be installed for proper functioning of `python3-opencv`. Please run the following:
|
||||
Linux systems require a couple of additional graphics
|
||||
libraries to be installed for proper functioning of
|
||||
`python3-opencv`. Please run the following:
|
||||
|
||||
`sudo apt update && sudo apt install -y libglib2.0-0 libgl1-mesa-glx`
|
||||
|
||||
3. The source installer is distributed in ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
look for a series of files named:
|
||||
=== "Mac"
|
||||
|
||||
- InvokeAI-installer-2.X.X.zip
|
||||
After installing Python, you may need to run the
|
||||
following command from the Terminal in order to install the Web
|
||||
certificates needed to download model data from https sites. If
|
||||
you see lots of CERTIFICATE ERRORS during the last part of the
|
||||
install, this is the problem, and you can fix it with this command:
|
||||
|
||||
(Where 2.X.X is the current release number).
|
||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
|
||||
Download the latest release.
|
||||
You may need to install the Xcode command line tools. These
|
||||
are a set of tools that are needed to run certain applications in a
|
||||
Terminal, including InvokeAI. This package is provided
|
||||
directly by Apple. To install, open a terminal window and run `xcode-select --install`. You will get a macOS system popup guiding you through the
|
||||
install. If you already have them installed, you will instead see some
|
||||
output in the Terminal advising you that the tools are already installed. More information can be found at [FreeCode Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
|
||||
|
||||
4. Unpack the zip file into a convenient directory. This will create a new
|
||||
directory named "InvokeAI-Installer". This example shows how this would look
|
||||
using the `unzip` command-line tool, but you may use any graphical or
|
||||
command-line Zip extractor:
|
||||
3. **Download the Installer**: The InvokeAI installer is distributed as a ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest),
|
||||
and look for a file named:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> unzip InvokeAI-installer-2.X.X-windows.zip
|
||||
Archive: C: \Linco\Downloads\InvokeAI-installer-2.X.X-windows.zip
|
||||
creating: InvokeAI-Installer\
|
||||
inflating: InvokeAI-Installer\install.bat
|
||||
inflating: InvokeAI-Installer\readme.txt
|
||||
...
|
||||
```
|
||||
- InvokeAI-installer-v2.X.X.zip
|
||||
|
||||
After successful installation, you can delete the `InvokeAI-Installer`
|
||||
directory.
|
||||
where "2.X.X" is the latest released version. The file is located
|
||||
at the very bottom of the release page, under **Assets**.
|
||||
|
||||
5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
|
||||
accept the dialog box that asks you if you wish to modify your registry.
|
||||
This activates long filename support on your system and will prevent
|
||||
mysterious errors during installation.
|
||||
4. **Unpack the installer**: Unpack the zip file into a convenient directory. This will create a new
|
||||
directory named "InvokeAI-Installer". When unpacked, the directory
|
||||
will look like this:
|
||||
|
||||
6. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
<figure markdown>
|
||||
![zipfile-screenshot](../assets/installer-walkthrough/unpacked-zipfile.png)
|
||||
</figure>
|
||||
|
||||
On Windows systems you will probably get an "Untrusted Publisher" warning.
|
||||
Click on "More Info" and select "Run Anyway." You trust us, right?
|
||||
5. **Launch the installer script from the desktop**: If you are using a desktop GUI, double-click the installer file
|
||||
appropriate for your platform. It will be named `install.bat` on
|
||||
Windows systems and `install.sh` on Linux and Macintosh
|
||||
systems. Be aware that your system's file browser may suppress the
|
||||
display of the file extension.
|
||||
|
||||
7. Alternatively, from the command line, run the shell script or .bat file:
|
||||
On Windows systems if you get an "Untrusted Publisher" warning.
|
||||
Click on "More Info" and then select "Run Anyway." You trust us, right?
|
||||
|
||||
6. **[Alternative] Launch the installer script from the command line**: Alternatively, from the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd InvokeAI-Installer
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
```
|
||||
|
||||
8. The script will ask you to choose where to install InvokeAI. Select a
|
||||
7. **Select the location to install InvokeAI**: The script will ask you to choose where to install InvokeAI. Select a
|
||||
directory with at least 18G of free space for a full install. InvokeAI and
|
||||
all its support files will be installed into a new directory named
|
||||
`invokeai` located at the location you specify.
|
||||
|
||||
<figure markdown>
|
||||
![confirm-install-directory-screenshot](../assets/installer-walkthrough/confirm-directory.png)
|
||||
</figure>
|
||||
|
||||
- The default is to install the `invokeai` directory in your home directory,
|
||||
usually `C:\Users\YourName\invokeai` on Windows systems,
|
||||
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
||||
@ -164,9 +171,23 @@ version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
||||
to suggest completions.
|
||||
|
||||
9. Sit back and let the install script work. It will install the third-party
|
||||
libraries needed by InvokeAI, then download the current InvokeAI release and
|
||||
install it.
|
||||
8. **Select your GPU**: The installer will autodetect your platform and will request you to
|
||||
confirm the type of GPU your graphics card has. On Linux systems,
|
||||
you will have the choice of CUDA (NVidia cards), ROCm (AMD cards),
|
||||
or CPU (no graphics acceleration). On Windows, you'll have the
|
||||
choice of CUDA vs CPU, and on Macs you'll be offered CPU only. When
|
||||
you select CPU on M1 or M2 Macintoshes, you will get MPS-based
|
||||
graphics acceleration without installing additional drivers. If you
|
||||
are unsure what GPU you are using, you can ask the installer to
|
||||
guess.
|
||||
|
||||
<figure markdown>
|
||||
![choose-gpu-screenshot](../assets/installer-walkthrough/choose-gpu.png)
|
||||
</figure>
|
||||
|
||||
|
||||
9. **Watch it go!**: Sit back and let the install script work. It will install the third-party
|
||||
libraries needed by InvokeAI and the application itself.
|
||||
|
||||
Be aware that some of the library download and install steps take a long
|
||||
time. In particular, the `pytorch` package is quite large and often appears
|
||||
@ -176,25 +197,25 @@ version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
minutes and nothing is happening, you can interrupt the script with ^C. You
|
||||
may restart it and it will pick up where it left off.
|
||||
|
||||
10. After installation completes, the installer will launch the configuration script, which will guide you through the first-time process
|
||||
of selecting one or more Stable Diffusion model weights files, downloading
|
||||
and configuring them. We provide a list of popular models that InvokeAI
|
||||
performs well with. However, you can add more weight files later on using
|
||||
the command-line client or the Web UI. See
|
||||
[Installing Models](050_INSTALLING_MODELS.md) for details.
|
||||
10. **Post-install Configuration**: After installation completes, the installer will launch the
|
||||
configuration script, which will guide you through the first-time
|
||||
process of selecting one or more Stable Diffusion model weights
|
||||
files, downloading and configuring them. We provide a list of
|
||||
popular models that InvokeAI performs well with. However, you can
|
||||
add more weight files later on using the command-line client or
|
||||
the Web UI. See [Installing Models](050_INSTALLING_MODELS.md) for
|
||||
details.
|
||||
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you must agree to in order to use. The script will list the
|
||||
steps you need to take to create an account on the official site that hosts
|
||||
the weights files, accept the agreement, and provide an access token that
|
||||
allows InvokeAI to legally download and install the weights files.
|
||||
<figure markdown>
|
||||
![downloading-models-screenshot](../assets/installer-walkthrough/downloading-models.png)
|
||||
</figure>
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
|
||||
|
||||
11. The script will now exit and you'll be ready to generate some images. Look
|
||||
11. **Running InvokeAI for the first time**: The script will now exit and you'll be ready to generate some images. Look
|
||||
for the directory `invokeai` installed in the location you chose at the
|
||||
beginning of the install session. Look for a shell script named `invoke.sh`
|
||||
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
||||
@ -205,17 +226,17 @@ version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
C:\Documents\Linco\invokeAI> invoke.bat
|
||||
```
|
||||
|
||||
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
|
||||
(1) the command-line interface, or (2) the web GUI. If you start the
|
||||
latter, you can load the user interface by pointing your browser at
|
||||
http://localhost:9090.
|
||||
- The `invoke.bat` (`invoke.sh`) script will give you the choice
|
||||
of starting (1) the command-line interface, (2) the web GUI, (3)
|
||||
textual inversion training, and (4) model merging.
|
||||
|
||||
- The script also offers you a third option labeled "open the developer
|
||||
console". If you choose this option, you will be dropped into a
|
||||
command-line interface in which you can run python commands directly,
|
||||
access developer tools, and launch InvokeAI with customized options.
|
||||
- By default, the script will launch the web interface. When you
|
||||
do this, you'll see a series of startup messages ending with
|
||||
instructions to point your browser at
|
||||
http://localhost:9090. Click on this link to open up a browser
|
||||
and start exploring InvokeAI's features.
|
||||
|
||||
12. You can launch InvokeAI with several different command-line arguments that
|
||||
12. **InvokeAI Options**: You can launch InvokeAI with several different command-line arguments that
|
||||
customize its behavior. For example, you can change the location of the
|
||||
image output directory, or select your favorite sampler. See the
|
||||
[Command-Line Interface](../features/CLI.md) for a full list of the options.
|
||||
@ -225,29 +246,63 @@ version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
`invokeai\invokeai.init`. It contains a variety of examples that you can
|
||||
follow to add and modify launch options.
|
||||
|
||||
!!! warning "The `invokeai` directory contains the `invokeai` application, its
|
||||
- The launcher script also offers you an option labeled "open the developer
|
||||
console". If you choose this option, you will be dropped into a
|
||||
command-line interface in which you can run python commands directly,
|
||||
access developer tools, and launch InvokeAI with customized options.
|
||||
|
||||
|
||||
!!! warning "Do not move or remove the `invokeai` directory"
|
||||
|
||||
The `invokeai` directory contains the `invokeai` application, its
|
||||
configuration files, the model weight files, and outputs of image generation.
|
||||
Once InvokeAI is installed, do not move or remove this directory."
|
||||
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### _Package dependency conflicts_
|
||||
|
||||
If you have previously installed InvokeAI or another Stable Diffusion package,
|
||||
the installer may occasionally pick up outdated libraries and either the
|
||||
installer or `invoke` will fail with complaints about library conflicts. You can
|
||||
address this by entering the `invokeai` directory and running `update.sh`, which
|
||||
will bring InvokeAI up to date with the latest libraries.
|
||||
If you have previously installed InvokeAI or another Stable Diffusion
|
||||
package, the installer may occasionally pick up outdated libraries and
|
||||
either the installer or `invoke` will fail with complaints about
|
||||
library conflicts. In this case, run the `invoke.sh`/`invoke.bat`
|
||||
command and enter the Developer's Console by picking option (5). This
|
||||
will take you to a command-line prompt.
|
||||
|
||||
### ldm from pypi
|
||||
Then give this command:
|
||||
|
||||
!!! warning
|
||||
`pip install InvokeAI --force-reinstall`
|
||||
|
||||
Some users have tried to correct dependency problems by installing
|
||||
the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
|
||||
has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
|
||||
ldm will make matters worse. If you've installed ldm, uninstall it with
|
||||
`pip uninstall ldm`.
|
||||
This should fix the issues.
|
||||
|
||||
### InvokeAI runs extremely slowly on Linux or Windows systems
|
||||
|
||||
The most frequent cause of this problem is when the installation
|
||||
process installed the CPU-only version of the torch machine-learning
|
||||
library, rather than a version that takes advantage of GPU
|
||||
acceleration. To confirm this issue, look at the InvokeAI startup
|
||||
messages. If you see a message saying ">> Using device CPU", then
|
||||
this is what happened.
|
||||
|
||||
To fix this problem, first determine whether you have an NVidia or an
|
||||
AMD GPU. The former uses the CUDA driver, and the latter uses ROCm
|
||||
(only available on Linux). Then run the `invoke.sh`/`invoke.bat`
|
||||
command and enter the Developer's Console by picking option (5). This
|
||||
will take you to a command-line prompt.
|
||||
|
||||
Then type the following commands:
|
||||
|
||||
=== "NVIDIA System"
|
||||
```bash
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
pip install xformers
|
||||
```
|
||||
|
||||
=== "AMD System"
|
||||
```bash
|
||||
pip install torch torchvision --force-reinstall --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
||||
```
|
||||
|
||||
### Corrupted configuration file
|
||||
|
||||
@ -272,7 +327,7 @@ the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
||||
assistance.
|
||||
|
||||
### other problems
|
||||
### Other Problems
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
available to help you. Either create an
|
||||
@ -284,36 +339,34 @@ hours, and often much sooner.
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||
To update to the latest released version (recommended), run the `update.sh`
|
||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||
release and re-run the `invokeai-configure` script to download any updated
|
||||
models files that may be needed. You can also use this to add additional models
|
||||
that you did not select at installation time.
|
||||
This distribution is changing rapidly, and we add new features
|
||||
regularly. Releases are announced at
|
||||
http://github.com/invoke-ai/InvokeAI/releases, and at
|
||||
https://pypi.org/project/InvokeAI/ To update to the latest released
|
||||
version (recommended), follow these steps:
|
||||
|
||||
You can now close the developer console and run `invoke` as before. If you get
|
||||
complaints about missing models, then you may need to do the additional step of
|
||||
running `invokeai-configure`. This happens relatively infrequently. To do
|
||||
this, simply open up the developer's console again and type
|
||||
`invokeai-configure`.
|
||||
1. Start the `invoke.sh`/`invoke.bat` launch script from within the
|
||||
`invokeai` root directory.
|
||||
|
||||
You may also use the `update` script to install any selected version of
|
||||
InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
|
||||
link of the version you wish to install. You can find the zip links by going to
|
||||
the one of the release pages and looking for the **Assets** section at the
|
||||
bottom. Alternatively, you can browse "branches" and "tags" at the top of the
|
||||
big code directory on the InvokeAI welcome page. When you find the version you
|
||||
want to install, go to the green "<> Code" button at the top, and copy the
|
||||
"Download ZIP" link.
|
||||
2. Choose menu item (6) "Developer's Console". This will launch a new
|
||||
command line.
|
||||
|
||||
Now run `update.sh` (or `update.bat`) with the version number of the desired InvokeAI
|
||||
version as its argument. For example, this will install the old 2.2.0 release.
|
||||
3. Type the following command:
|
||||
|
||||
```cmd
|
||||
update.sh v2.2.0
|
||||
```bash
|
||||
pip install InvokeAI --upgrade
|
||||
```
|
||||
4. Watch the installation run. Once it is complete, you may exit the
|
||||
command line by typing `exit`, and then start InvokeAI from the
|
||||
launch script as per usual.
|
||||
|
||||
|
||||
Alternatively, if you wish to get the most recent unreleased
|
||||
development version, perform the same steps to enter the developer's
|
||||
console, and then type:
|
||||
|
||||
```bash
|
||||
pip install https://github.com/invoke-ai/InvokeAI/archive/refs/heads/main.zip
|
||||
```
|
||||
|
||||
You can get the list of version numbers by going to the [releases
|
||||
page](https://github.com/invoke-ai/InvokeAI/releases) or by browsing
|
||||
the (Tags)[https://github.com/invoke-ai/InvokeAI/tags] list from the
|
||||
Code section of the main github page.
|
||||
|
||||
|
@ -14,17 +14,46 @@ title: Installing Manually
|
||||
|
||||
## Introduction
|
||||
|
||||
!!! tip As of InvokeAI v2.3.0 installation using the `conda` package manager
|
||||
is no longer being supported. It will likely still work, but we are not testing
|
||||
this installation method.
|
||||
!!! tip "Conda"
|
||||
As of InvokeAI v2.3.0 installation using the `conda` package manager is no longer being supported. It will likely still work, but we are not testing this installation method.
|
||||
|
||||
On Windows systems, you are encouraged to install and use the
|
||||
[PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
||||
which provides compatibility with Linux and Mac shells and nice features such as
|
||||
command-line completion.
|
||||
which provides compatibility with Linux and Mac shells and nice
|
||||
features such as command-line completion.
|
||||
|
||||
To install InvokeAI with virtual environments and the PIP package manager,
|
||||
please follow these steps:
|
||||
### Prerequisites
|
||||
|
||||
Before you start, make sure you have the following preqrequisites
|
||||
installed. These are described in more detail in [Automated
|
||||
Installation](010_INSTALL_AUTOMATED.md), and in many cases will
|
||||
already be installed (if, for example, you have used your system for
|
||||
gaming):
|
||||
|
||||
* **Python** version 3.9 or 3.10 (3.11 is not recommended).
|
||||
|
||||
* **CUDA Tools** For those with _NVidia GPUs_, you will need to
|
||||
install the [CUDA toolkit and optionally the XFormers library](070_INSTALL_XFORMERS.md).
|
||||
|
||||
* **ROCm Tools** For _Linux users with AMD GPUs_, you will need
|
||||
to install the [ROCm toolkit](./030_INSTALL_CUDA_AND_ROCM.md). Note that
|
||||
InvokeAI does not support AMD GPUs on Windows systems due to
|
||||
lack of a Windows ROCm library.
|
||||
|
||||
* **Visual C++ Libraries** _Windows users_ must install the free
|
||||
[Visual C++ libraries from Microsoft](https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170)
|
||||
|
||||
* **The Xcode command line tools** for _Macintosh users_. Instructions are
|
||||
available at [Free Code Camp](https://www.freecodecamp.org/news/install-xcode-command-line-tools/)
|
||||
|
||||
* _Macintosh users_ may also need to run the `Install Certificates` command
|
||||
if model downloads give lots of certificate errors. Run:
|
||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
|
||||
### Installation Walkthrough
|
||||
|
||||
To install InvokeAI with virtual environments and the PIP package
|
||||
manager, please follow these steps:
|
||||
|
||||
1. Please make sure you are using Python 3.9 or 3.10. The rest of the install
|
||||
procedure depends on this and will not work with other versions:
|
||||
@ -33,74 +62,125 @@ please follow these steps:
|
||||
python -V
|
||||
```
|
||||
|
||||
2. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
||||
GitHub:
|
||||
2. Create a directory to contain your InvokeAI library, configuration
|
||||
files, and models. This is known as the "runtime" or "root"
|
||||
directory, and often lives in your home directory under the name `invokeai`.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
```
|
||||
Please keep in mind the disk space requirements - you will need at
|
||||
least 20GB for the models and the virtual environment. From now
|
||||
on we will refer to this directory as `INVOKEAI_ROOT`. For convenience,
|
||||
the steps below create a shell variable of that name which contains the
|
||||
path to `HOME/invokeai`.
|
||||
|
||||
This will create InvokeAI folder where you will follow the rest of the
|
||||
steps.
|
||||
|
||||
3. Create a directory of to contain your InvokeAI installation (known as the "runtime"
|
||||
or "root" directory). This is where your models, configs, and outputs will live
|
||||
by default. Please keep in mind the disk space requirements - you will need at
|
||||
least 18GB (as of this writing) for the models and the virtual environment.
|
||||
From now on we will refer to this directory as `INVOKEAI_ROOT`. This keeps the
|
||||
runtime directory separate from the source code and aids in updating.
|
||||
=== "Linux/Mac"
|
||||
|
||||
```bash
|
||||
export INVOKEAI_ROOT="~/invokeai"
|
||||
mkdir ${INVOKEAI_ROOT}
|
||||
mkdir $INVOKEAI_ROOT
|
||||
```
|
||||
|
||||
4. From within the InvokeAI top-level directory, create and activate a virtual
|
||||
environment named `.venv` and prompt displaying `InvokeAI`:
|
||||
=== "Windows (Powershell)"
|
||||
|
||||
```bash
|
||||
python -m venv ${INVOKEAI_ROOT}/.venv \
|
||||
--prompt invokeai \
|
||||
--upgrade-deps \
|
||||
--copies
|
||||
source ${INVOKEAI_ROOT}/.venv/bin/activate
|
||||
Set-Variable -Name INVOKEAI_ROOT -Value $Home/invokeai
|
||||
mkdir $INVOKEAI_ROOT
|
||||
```
|
||||
|
||||
!!! warning
|
||||
3. Enter the root (invokeai) directory and create a virtual Python
|
||||
environment within it named `.venv`. If the command `python`
|
||||
doesn't work, try `python3`. Note that while you may create the
|
||||
virtual environment anywhere in the file system, we recommend that
|
||||
you create it within the root directory as shown here. This makes
|
||||
it possible for the InvokeAI applications to find the model data
|
||||
and configuration. If you do not choose to install the virtual
|
||||
environment inside the root directory, then you **must** set the
|
||||
`INVOKEAI_ROOT` environment variable in your shell environment, for
|
||||
example, by editing `~/.bashrc` or `~/.zshrc` files, or setting the
|
||||
Windows environment variable using the Advanced System Settings dialogue.
|
||||
Refer to your operating system documentation for details.
|
||||
|
||||
You **may** create your virtual environment anywhere on the filesystem.
|
||||
But IF you choose a location that is *not* inside the `$INVOKEAI_ROOT` directory,
|
||||
then you must set the `INVOKEAI_ROOT` environment variable in your shell environment,
|
||||
for example, by editing `~/.bashrc` or `~/.zshrc` files, or setting the Windows environment
|
||||
variable. Refer to your operating system / shell documentation for the correct way of doing so.
|
||||
|
||||
5. Make sure that pip is installed in your virtual environment an up to date:
|
||||
=== "Linux/Mac"
|
||||
```bash
|
||||
cd $INVOKEAI_ROOT
|
||||
python -m venv create .venv
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
```bash
|
||||
cd $INVOKEAI_ROOT
|
||||
python -m venv create .venv
|
||||
```
|
||||
|
||||
4. Activate the new environment:
|
||||
|
||||
=== "Linux/Mac"
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
```bash
|
||||
.venv\script\activate
|
||||
```
|
||||
If you get a permissions error at this point, run the command
|
||||
`Set-ExecutionPolicy -ExecutionPolicy Unrestricted -Scope CurrentUser`
|
||||
and try `activate` again.
|
||||
|
||||
The command-line prompt should change to to show `(.venv)` at the
|
||||
beginning of the prompt. Note that all the following steps should be
|
||||
run while inside the INVOKEAI_ROOT directory
|
||||
|
||||
5. Make sure that pip is installed in your virtual environment and up to date:
|
||||
|
||||
```bash
|
||||
python -m pip install --upgrade pip
|
||||
```
|
||||
|
||||
6. Install Package
|
||||
6. Install the InvokeAI Package. The `--extra-index-url` option is used to select among CUDA, ROCm and CPU/MPS drivers as shown below:
|
||||
|
||||
=== "CUDA (NVidia)"
|
||||
```bash
|
||||
pip install --use-pep517 .
|
||||
pip install InvokeAI[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
```
|
||||
|
||||
Deactivate and reactivate your runtime directory so that the invokeai-specific commands
|
||||
=== "ROCm (AMD)"
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
||||
```
|
||||
|
||||
=== "CPU (Intel Macs & non-GPU systems)"
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
```
|
||||
|
||||
=== "MPS (M1 and M2 Macs)"
|
||||
```bash
|
||||
pip install InvokeAI --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
```
|
||||
|
||||
7. Deactivate and reactivate your runtime directory so that the invokeai-specific commands
|
||||
become available in the environment
|
||||
|
||||
```
|
||||
deactivate && source ${INVOKEAI_ROOT}/.venv/bin/activate
|
||||
=== "Linux/Macintosh"
|
||||
```bash
|
||||
deactivate && source .venv/bin/activate
|
||||
```
|
||||
|
||||
7. Set up the runtime directory
|
||||
=== "Windows"
|
||||
```bash
|
||||
deactivate
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
8. Set up the runtime directory
|
||||
|
||||
In this step you will initialize your runtime directory with the downloaded
|
||||
models, model config files, directory for textual inversion embeddings, and
|
||||
your outputs.
|
||||
|
||||
```bash
|
||||
invokeai-configure --root ${INVOKEAI_ROOT}
|
||||
invokeai-configure
|
||||
```
|
||||
|
||||
The script `invokeai-configure` will interactively guide you through the
|
||||
@ -119,35 +199,36 @@ please follow these steps:
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [here](050_INSTALLING_MODELS.md).
|
||||
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
|
||||
|
||||
7. Run the command-line- or the web- interface:
|
||||
9. Run the command-line- or the web- interface:
|
||||
|
||||
Activate the environment (with `source .venv/bin/activate`), and then run
|
||||
the script `invokeai`. If you selected a non-default location for the
|
||||
runtime directory, please specify the path with the `--root_dir` option
|
||||
(abbreviated below as `--root`):
|
||||
From within INVOKEAI_ROOT, activate the environment
|
||||
(with `source .venv/bin/activate` or `.venv\scripts\activate), and then run
|
||||
the script `invokeai`. If the virtual environment you selected is NOT inside
|
||||
INVOKEAI_ROOT, then you must specify the path to the root directory by adding
|
||||
`--root_dir \path\to\invokeai` to the commands below:
|
||||
|
||||
!!! example ""
|
||||
|
||||
!!! warning "Make sure that the virtual environment is activated, which should create `(invokeai)` in front of your prompt!"
|
||||
!!! warning "Make sure that the virtual environment is activated, which should create `(.venv)` in front of your prompt!"
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
invokeai --root ~/invokeai
|
||||
invokeai
|
||||
```
|
||||
|
||||
=== "local Webserver"
|
||||
|
||||
```bash
|
||||
invokeai --web --root ~/invokeai
|
||||
invokeai --web
|
||||
```
|
||||
|
||||
=== "Public Webserver"
|
||||
|
||||
```bash
|
||||
invokeai --web --host 0.0.0.0 --root ~/invokeai
|
||||
invokeai --web --host 0.0.0.0
|
||||
```
|
||||
|
||||
If you choose the run the web interface, point your browser at
|
||||
@ -155,23 +236,99 @@ please follow these steps:
|
||||
|
||||
!!! tip
|
||||
|
||||
You can permanently set the location of the runtime directory by setting the environment variable `INVOKEAI_ROOT` to the path of the directory. As mentioned previously, this is
|
||||
**required** if your virtual environment is located outside of your runtime directory.
|
||||
You can permanently set the location of the runtime directory
|
||||
by setting the environment variable `INVOKEAI_ROOT` to the
|
||||
path of the directory. As mentioned previously, this is
|
||||
*highly recommended** if your virtual environment is located outside of
|
||||
your runtime directory.
|
||||
|
||||
8. Render away!
|
||||
10. Render away!
|
||||
|
||||
Browse the [features](../features/CLI.md) section to learn about all the
|
||||
things you can do with InvokeAI.
|
||||
|
||||
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
||||
card with the ROCm driver, you may have to wait for over a minute the first
|
||||
time you try to generate an image. Fortunately, after the warm-up period
|
||||
rendering will be fast.
|
||||
|
||||
9. Subsequently, to relaunch the script, activate the virtual environment, and
|
||||
11. Subsequently, to relaunch the script, activate the virtual environment, and
|
||||
then launch `invokeai` command. If you forget to activate the virtual
|
||||
environment you will most likeley receive a `command not found` error.
|
||||
|
||||
!!! warning
|
||||
|
||||
Do not move the runtime directory after installation. The virtual environment has absolute paths in it that get confused if the directory is moved.
|
||||
Do not move the runtime directory after installation. The virtual environment will get confused if the directory is moved.
|
||||
|
||||
12. Other scripts
|
||||
|
||||
The [Textual Inversion](../features/TEXTUAL_INVERSION.md) script can be launched with the command:
|
||||
|
||||
```bash
|
||||
invokeai-ti --gui
|
||||
```
|
||||
|
||||
Similarly, the [Model Merging](../features/MODEL_MERGING.md) script can be launched with the command:
|
||||
|
||||
```bash
|
||||
invokeai-merge --gui
|
||||
```
|
||||
|
||||
Leave off the `--gui` option to run the script using command-line arguments. Pass the `--help` argument
|
||||
to get usage instructions.
|
||||
|
||||
### Developer Install
|
||||
|
||||
If you have an interest in how InvokeAI works, or you would like to
|
||||
add features or bugfixes, you are encouraged to install the source
|
||||
code for InvokeAI. For this to work, you will need to install the
|
||||
`git` source code management program. If it is not already installed
|
||||
on your system, please see the [Git Installation
|
||||
Guide](https://github.com/git-guides/install-git)
|
||||
|
||||
1. From the command line, run this command:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
```
|
||||
|
||||
This will create a directory named `InvokeAI` and populate it with the
|
||||
full source code from the InvokeAI repository.
|
||||
|
||||
2. Activate the InvokeAI virtual environment as per step (4) of the manual
|
||||
installation protocol (important!)
|
||||
|
||||
3. Enter the InvokeAI repository directory and run one of these
|
||||
commands, based on your GPU:
|
||||
|
||||
=== "CUDA (NVidia)"
|
||||
```bash
|
||||
pip install -e .[xformers] --use-pep517 --extra-index-url https://download.pytorch.org/whl/cu117
|
||||
```
|
||||
|
||||
=== "ROCm (AMD)"
|
||||
```bash
|
||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/rocm5.2
|
||||
```
|
||||
|
||||
=== "CPU (Intel Macs & non-GPU systems)"
|
||||
```bash
|
||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
```
|
||||
|
||||
=== "MPS (M1 and M2 Macs)"
|
||||
```bash
|
||||
pip install -e . --use-pep517 --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
```
|
||||
|
||||
Be sure to pass `-e` (for an editable install) and don't forget the
|
||||
dot ("."). It is part of the command.
|
||||
|
||||
You can now run `invokeai` and its related commands. The code will be
|
||||
read from the repository, so that you can edit the .py source files
|
||||
and watch the code's behavior change.
|
||||
|
||||
4. If you wish to contribute to the InvokeAI project, you are
|
||||
encouraged to establish a GitHub account and "fork"
|
||||
https://github.com/invoke-ai/InvokeAI into your own copy of the
|
||||
repository. You can then use GitHub functions to create and submit
|
||||
pull requests to contribute improvements to the project.
|
||||
|
||||
Please see [Contributing](/index.md#Contributing) for hints
|
||||
on getting started.
|
||||
|
125
docs/installation/030_INSTALL_CUDA_AND_ROCM.md
Normal file
@ -0,0 +1,125 @@
|
||||
---
|
||||
title: NVIDIA Cuda / AMD ROCm
|
||||
---
|
||||
|
||||
<figure markdown>
|
||||
|
||||
# :simple-nvidia: CUDA | :simple-amd: ROCm
|
||||
|
||||
</figure>
|
||||
|
||||
In order for InvokeAI to run at full speed, you will need a graphics
|
||||
card with a supported GPU. InvokeAI supports NVidia cards via the CUDA
|
||||
driver on Windows and Linux, and AMD cards via the ROCm driver on Linux.
|
||||
|
||||
## :simple-nvidia: CUDA
|
||||
|
||||
### Linux and Windows Install
|
||||
|
||||
If you have used your system for other graphics-intensive tasks, such
|
||||
as gaming, you may very well already have the CUDA drivers
|
||||
installed. To confirm, open up a command-line window and type:
|
||||
|
||||
```
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
If this command produces a status report on the GPU(s) installed on
|
||||
your system, CUDA is installed and you have no more work to do. If
|
||||
instead you get "command not found", or similar, then the driver will
|
||||
need to be installed.
|
||||
|
||||
We strongly recommend that you install the CUDA Toolkit package
|
||||
directly from NVIDIA. **Do not try to install Ubuntu's
|
||||
nvidia-cuda-toolkit package. It is out of date and will cause
|
||||
conflicts among the NVIDIA driver and binaries.**
|
||||
|
||||
Go to [CUDA Toolkit 11.7
|
||||
Downloads](https://developer.nvidia.com/cuda-11-7-0-download-archive),
|
||||
and use the target selection wizard to choose your operating system,
|
||||
hardware platform, and preferred installation method (e.g. "local"
|
||||
versus "network").
|
||||
|
||||
This will provide you with a downloadable install file or, depending
|
||||
on your choices, a recipe for downloading and running a install shell
|
||||
script. Be sure to read and follow the full installation instructions.
|
||||
|
||||
After an install that seems successful, you can confirm by again
|
||||
running `nvidia-smi` from the command line.
|
||||
|
||||
### Linux Install with a Runtime Container
|
||||
|
||||
On Linux systems, an alternative to installing CUDA Toolkit directly on
|
||||
your system is to run an NVIDIA software container that has the CUDA
|
||||
libraries already in place. This is recommended if you are already
|
||||
familiar with containerization technologies such as Docker.
|
||||
|
||||
For downloads and instructions, visit the [NVIDIA CUDA Container
|
||||
Runtime Site](https://developer.nvidia.com/nvidia-container-runtime)
|
||||
|
||||
### Torch Installation
|
||||
|
||||
When installing torch and torchvision manually with `pip`, remember to provide
|
||||
the argument `--extra-index-url
|
||||
https://download.pytorch.org/whl/cu117` as described in the [Manual
|
||||
Installation Guide](020_INSTALL_MANUAL.md).
|
||||
|
||||
## :simple-amd: ROCm
|
||||
|
||||
### Linux Install
|
||||
|
||||
AMD GPUs are only supported on Linux platforms due to the lack of a
|
||||
Windows ROCm driver at the current time. Also be aware that support
|
||||
for newer AMD GPUs is spotty. Your mileage may vary.
|
||||
|
||||
It is possible that the ROCm driver is already installed on your
|
||||
machine. To test, open up a terminal window and issue the following
|
||||
command:
|
||||
|
||||
```
|
||||
rocm-smi
|
||||
```
|
||||
|
||||
If you get a table labeled "ROCm System Management Interface" the
|
||||
driver is installed and you are done. If you get "command not found,"
|
||||
then the driver needs to be installed.
|
||||
|
||||
Go to AMD's [ROCm Downloads
|
||||
Guide](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation_new.html#installation-methods)
|
||||
and scroll to the _Installation Methods_ section. Find the subsection
|
||||
for the install method for your preferred Linux distribution, and
|
||||
issue the commands given in the recipe.
|
||||
|
||||
Annoyingly, the official AMD site does not have a recipe for the most
|
||||
recent version of Ubuntu, 22.04. However, this [community-contributed
|
||||
recipe](https://novaspirit.github.io/amdgpu-rocm-ubu22/) is reported
|
||||
to work well.
|
||||
|
||||
After installation, please run `rocm-smi` a second time to confirm
|
||||
that the driver is present and the GPU is recognized. You may need to
|
||||
do a reboot in order to load the driver.
|
||||
|
||||
### Linux Install with a ROCm-docker Container
|
||||
|
||||
If you are comfortable with the Docker containerization system, then
|
||||
you can build a ROCm docker file. The source code and installation
|
||||
recipes are available
|
||||
[Here](https://github.com/RadeonOpenCompute/ROCm-docker/blob/master/quick-start.md)
|
||||
|
||||
### Torch Installation
|
||||
|
||||
When installing torch and torchvision manually with `pip`, remember to provide
|
||||
the argument `--extra-index-url
|
||||
https://download.pytorch.org/whl/rocm5.2` as described in the [Manual
|
||||
Installation Guide](020_INSTALL_MANUAL.md).
|
||||
|
||||
This will be done automatically for you if you use the installer
|
||||
script.
|
||||
|
||||
Be aware that the torch machine learning library does not seamlessly
|
||||
interoperate with all AMD GPUs and you may experience garbled images,
|
||||
black images, or long startup delays before rendering commences. Most
|
||||
of these issues can be solved by Googling for workarounds. If you have
|
||||
a problem and find a solution, please post an
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) so that other
|
||||
users benefit and we can update this document.
|
@ -4,249 +4,347 @@ title: Installing Models
|
||||
|
||||
# :octicons-paintbrush-16: Installing Models
|
||||
|
||||
## Model Weight Files
|
||||
## Checkpoint and Diffusers Models
|
||||
|
||||
The model weight files ('\*.ckpt') are the Stable Diffusion "secret sauce". They
|
||||
are the product of training the AI on millions of captioned images gathered from
|
||||
multiple sources.
|
||||
The model checkpoint files ('\*.ckpt') are the Stable Diffusion
|
||||
"secret sauce". They are the product of training the AI on millions of
|
||||
captioned images gathered from multiple sources.
|
||||
|
||||
Originally there was only a single Stable Diffusion weights file, which many
|
||||
people named `model.ckpt`. Now there are dozens or more that have been "fine
|
||||
tuned" to provide particulary styles, genres, or other features. InvokeAI allows
|
||||
you to install and run multiple model weight files and switch between them
|
||||
quickly in the command-line and web interfaces.
|
||||
Originally there was only a single Stable Diffusion weights file,
|
||||
which many people named `model.ckpt`. Now there are dozens or more
|
||||
that have been fine tuned to provide particulary styles, genres, or
|
||||
other features. In addition, there are several new formats that
|
||||
improve on the original checkpoint format: a `.safetensors` format
|
||||
which prevents malware from masquerading as a model, and `diffusers`
|
||||
models, the most recent innovation.
|
||||
|
||||
This manual will guide you through installing and configuring model weight
|
||||
files.
|
||||
InvokeAI supports all three formats but strongly prefers the
|
||||
`diffusers` format. These are distributed as directories containing
|
||||
multiple subfolders, each of which contains a different aspect of the
|
||||
model. The advantage of this is that the models load from disk really
|
||||
fast. Another advantage is that `diffusers` models are supported by a
|
||||
large and active set of open source developers working at and with
|
||||
HuggingFace organization, and improvements in both rendering quality
|
||||
and performance are being made at a rapid pace. Among other features
|
||||
is the ability to download and install a `diffusers` model just by
|
||||
providing its HuggingFace repository ID.
|
||||
|
||||
While InvokeAI will continue to support `.ckpt` and `.safetensors`
|
||||
models for the near future, these are deprecated and support will
|
||||
likely be withdrawn at some point in the not-too-distant future.
|
||||
|
||||
This manual will guide you through installing and configuring model
|
||||
weight files and converting legacy `.ckpt` and `.safetensors` files
|
||||
into performant `diffusers` models.
|
||||
|
||||
## Base Models
|
||||
|
||||
InvokeAI comes with support for a good initial set of models listed in the model
|
||||
configuration file `configs/models.yaml`. They are:
|
||||
InvokeAI comes with support for a good set of starter models. You'll
|
||||
find them listed in the master models file
|
||||
`configs/INITIAL_MODELS.yaml` in the InvokeAI root directory. The
|
||||
subset that are currently installed are found in
|
||||
`configs/models.yaml`. The current list is:
|
||||
|
||||
| Model | Weight File | Description | DOWNLOAD FROM |
|
||||
| Model | HuggingFace Repo ID | Description | URL
|
||||
| -------------------- | --------------------------------- | ---------------------------------------------------------- | -------------------------------------------------------------- |
|
||||
| stable-diffusion-1.5 | v1-5-pruned-emaonly.ckpt | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||
| stable-diffusion-1.4 | sd-v1-4.ckpt | Previous version of base Stable Diffusion model | https://huggingface.co/CompVis/stable-diffusion-v-1-4-original |
|
||||
| inpainting-1.5 | sd-v1-5-inpainting.ckpt | Stable Diffusion 1.5 model specialized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||
| waifu-diffusion-1.3 | model-epoch09-float32.ckpt | Stable Diffusion 1.4 trained to produce anime images | https://huggingface.co/hakurei/waifu-diffusion-v1-3 |
|
||||
| `<all models>` | vae-ft-mse-840000-ema-pruned.ckpt | A fine-tune file add-on file that improves face generation | https://huggingface.co/stabilityai/sd-vae-ft-mse-original/ |
|
||||
| stable-diffusion-1.5 | runwayml/stable-diffusion-v1-5 | Most recent version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-5 |
|
||||
| stable-diffusion-1.4 | runwayml/stable-diffusion-v1-4 | Previous version of base Stable Diffusion model | https://huggingface.co/runwayml/stable-diffusion-v1-4 |
|
||||
| inpainting-1.5 | runwayml/stable-diffusion-inpainting | Stable diffusion 1.5 optimized for inpainting | https://huggingface.co/runwayml/stable-diffusion-inpainting |
|
||||
| stable-diffusion-2.1-base |stabilityai/stable-diffusion-2-1-base | Stable Diffusion version 2.1 trained on 512 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1-base |
|
||||
| stable-diffusion-2.1-768 |stabilityai/stable-diffusion-2-1 | Stable Diffusion version 2.1 trained on 768 pixel images | https://huggingface.co/stabilityai/stable-diffusion-2-1 |
|
||||
| dreamlike-diffusion-1.0 | dreamlike-art/dreamlike-diffusion-1.0 | An SD 1.5 model finetuned on high quality art | https://huggingface.co/dreamlike-art/dreamlike-diffusion-1.0 |
|
||||
| dreamlike-photoreal-2.0 | dreamlike-art/dreamlike-photoreal-2.0 | A photorealistic model trained on 768 pixel images| https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0 |
|
||||
| openjourney-4.0 | prompthero/openjourney | An SD 1.5 model finetuned on Midjourney images prompt with "mdjrny-v4 style" | https://huggingface.co/prompthero/openjourney |
|
||||
| nitro-diffusion-1.0 | nitrosocke/Nitro-Diffusion | An SD 1.5 model finetuned on three styles, prompt with "archer style", "arcane style" or "modern disney style" | https://huggingface.co/nitrosocke/Nitro-Diffusion|
|
||||
| trinart-2.0 | naclbit/trinart_stable_diffusion_v2 | An SD 1.5 model finetuned with ~40,000 assorted high resolution manga/anime-style pictures | https://huggingface.co/naclbit/trinart_stable_diffusion_v2|
|
||||
| trinart-characters-2_0 | naclbit/trinart_derrida_characters_v2_stable_diffusion | An SD 1.5 model finetuned with 19.2M manga/anime-style pictures | https://huggingface.co/naclbit/trinart_derrida_characters_v2_stable_diffusion|
|
||||
|
||||
Note that these files are covered by an "Ethical AI" license which forbids
|
||||
certain uses. You will need to create an account on the Hugging Face website and
|
||||
accept the license terms before you can access the files.
|
||||
|
||||
The predefined configuration file for InvokeAI (located at
|
||||
`configs/models.yaml`) provides entries for each of these weights files.
|
||||
`stable-diffusion-1.5` is the default model used, and we strongly recommend that
|
||||
you install this weights file if nothing else.
|
||||
certain uses. When you initially download them, you are asked to
|
||||
accept the license terms.
|
||||
|
||||
## Community-Contributed Models
|
||||
|
||||
There are too many to list here and more are being contributed every day.
|
||||
Hugging Face maintains a
|
||||
[fast-growing repository](https://huggingface.co/sd-concepts-library) of
|
||||
fine-tune (".bin") models that can be imported into InvokeAI by passing the
|
||||
`--embedding_path` option to the `invoke.py` command.
|
||||
There are too many to list here and more are being contributed every
|
||||
day. [HuggingFace](https://huggingface.co/models?library=diffusers)
|
||||
is a great resource for diffusers models, and is also the home of a
|
||||
[fast-growing repository](https://huggingface.co/sd-concepts-library)
|
||||
of embedding (".bin") models that add subjects and/or styles to your
|
||||
images. The latter are automatically installed on the fly when you
|
||||
include the text `<concept-name>` in your prompt. See [Concepts
|
||||
Library](../features/CONCEPTS.md) for more information.
|
||||
|
||||
[This page](https://rentry.org/sdmodels) hosts a large list of official and
|
||||
unofficial Stable Diffusion models and where they can be obtained.
|
||||
Another popular site for community-contributed models is
|
||||
[CIVITAI](https://civitai.com). This extensive site currently supports
|
||||
only `.safetensors` and `.ckpt` models, but they can be easily loaded
|
||||
into InvokeAI and/or converted into optimized `diffusers` models. Be
|
||||
aware that CIVITAI hosts many models that generate NSFW content.
|
||||
|
||||
## Installation
|
||||
|
||||
There are three ways to install weights files:
|
||||
There are multiple ways to install and manage models:
|
||||
|
||||
1. During InvokeAI installation, the `invokeai-configure` script can download
|
||||
them for you.
|
||||
1. The `invokeai-configure` script which will download and install them for you.
|
||||
|
||||
2. You can use the command-line interface (CLI) to import, configure and modify
|
||||
new models files.
|
||||
2. The command-line tool (CLI) has commands that allows you to import, configure and modify
|
||||
models files.
|
||||
|
||||
3. You can download the files manually and add the appropriate entries to
|
||||
`models.yaml`.
|
||||
3. The web interface (WebUI) has a GUI for importing and managing
|
||||
models.
|
||||
|
||||
### Installation via `invokeai-configure`
|
||||
|
||||
This is the most automatic way. Run `invokeai-configure` from the
|
||||
console. It will ask you to select which models to download and lead you through
|
||||
the steps of setting up a Hugging Face account if you haven't done so already.
|
||||
|
||||
To start, run `invokeai-configure` from within the InvokeAI:
|
||||
directory
|
||||
|
||||
!!! example ""
|
||||
|
||||
```text
|
||||
Loading Python libraries...
|
||||
|
||||
** INTRODUCTION **
|
||||
Welcome to InvokeAI. This script will help download the Stable Diffusion weight files
|
||||
and other large models that are needed for text to image generation. At any point you may interrupt
|
||||
this program and resume later.
|
||||
|
||||
** WEIGHT SELECTION **
|
||||
Would you like to download the Stable Diffusion model weights now? [y]
|
||||
|
||||
Choose the weight file(s) you wish to download. Before downloading you
|
||||
will be given the option to view and change your selections.
|
||||
|
||||
[1] stable-diffusion-1.5:
|
||||
The newest Stable Diffusion version 1.5 weight file (4.27 GB) (recommended)
|
||||
Download? [y]
|
||||
[2] inpainting-1.5:
|
||||
RunwayML SD 1.5 model optimized for inpainting (4.27 GB) (recommended)
|
||||
Download? [y]
|
||||
[3] stable-diffusion-1.4:
|
||||
The original Stable Diffusion version 1.4 weight file (4.27 GB)
|
||||
Download? [n] n
|
||||
[4] waifu-diffusion-1.3:
|
||||
Stable Diffusion 1.4 fine tuned on anime-styled images (4.27 GB)
|
||||
Download? [n] y
|
||||
[5] ft-mse-improved-autoencoder-840000:
|
||||
StabilityAI improved autoencoder fine-tuned for human faces (recommended; 335 MB) (recommended)
|
||||
Download? [y] y
|
||||
The following weight files will be downloaded:
|
||||
[1] stable-diffusion-1.5*
|
||||
[2] inpainting-1.5
|
||||
[4] waifu-diffusion-1.3
|
||||
[5] ft-mse-improved-autoencoder-840000
|
||||
*default
|
||||
Ok to download? [y]
|
||||
** LICENSE AGREEMENT FOR WEIGHT FILES **
|
||||
|
||||
1. To download the Stable Diffusion weight files you need to read and accept the
|
||||
CreativeML Responsible AI license. If you have not already done so, please
|
||||
create an account using the "Sign Up" button:
|
||||
|
||||
https://huggingface.co
|
||||
|
||||
You will need to verify your email address as part of the HuggingFace
|
||||
registration process.
|
||||
|
||||
2. After creating the account, login under your account and accept
|
||||
the license terms located here:
|
||||
|
||||
https://huggingface.co/CompVis/stable-diffusion-v-1-4-original
|
||||
|
||||
Press <enter> when you are ready to continue:
|
||||
...
|
||||
```
|
||||
|
||||
When the script is complete, you will find the downloaded weights files in
|
||||
`models/ldm/stable-diffusion-v1` and a matching configuration file in
|
||||
`configs/models.yaml`.
|
||||
|
||||
You can run the script again to add any models you didn't select the first time.
|
||||
Note that as a safety measure the script will _never_ remove a
|
||||
previously-installed weights file. You will have to do this manually.
|
||||
From the `invoke` launcher, choose option (6) "re-run the configure
|
||||
script to download new models." This will launch the same script that
|
||||
prompted you to select models at install time. You can use this to add
|
||||
models that you skipped the first time around. It is all right to
|
||||
specify a model that was previously downloaded; the script will just
|
||||
confirm that the files are complete.
|
||||
|
||||
### Installation via the CLI
|
||||
|
||||
You can install a new model, including any of the community-supported ones, via
|
||||
the command-line client's `!import_model` command.
|
||||
|
||||
1. First download the desired model weights file and place it under
|
||||
`models/ldm/stable-diffusion-v1/`. You may rename the weights file to
|
||||
something more memorable if you wish. Record the path of the weights file
|
||||
(e.g. `models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`)
|
||||
#### Installing `.ckpt` and `.safetensors` models
|
||||
|
||||
2. Launch the `invoke.py` CLI with `python scripts/invoke.py`.
|
||||
If the model is already downloaded to your local disk, use
|
||||
`!import_model /path/to/file.ckpt` to load it. For example:
|
||||
|
||||
3. At the `invoke>` command-line, enter the command
|
||||
`!import_model <path to model>`. For example:
|
||||
```bash
|
||||
invoke> !import_model C:/Users/fred/Downloads/martians.safetensors
|
||||
```
|
||||
|
||||
`invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
|
||||
!!! tip "Forward Slashes"
|
||||
On Windows systems, use forward slashes rather than backslashes
|
||||
in your file paths.
|
||||
If you do use backslashes,
|
||||
you must double them like this:
|
||||
`C:\\Users\\fred\\Downloads\\martians.safetensors`
|
||||
|
||||
!!! tip "the CLI supports file path autocompletion"
|
||||
Alternatively you can directly import the file using its URL:
|
||||
|
||||
```bash
|
||||
invoke> !import_model https://example.org/sd_models/martians.safetensors
|
||||
```
|
||||
|
||||
For this to work, the URL must not be password-protected. Otherwise
|
||||
you will receive a 404 error.
|
||||
|
||||
When you import a legacy model, the CLI will ask you a few questions
|
||||
about the model, including what size image it was trained on (usually
|
||||
512x512), what name and description you wish to use for it, what
|
||||
configuration file to use for it (usually the default
|
||||
`v1-inference.yaml`), whether you'd like to make this model the
|
||||
default at startup time, and whether you would like to install a
|
||||
custom VAE (variable autoencoder) file for the model. For recent
|
||||
models, the answer to the VAE question is usually "no," but it won't
|
||||
hurt to answer "yes".
|
||||
|
||||
#### Installing `diffusers` models
|
||||
|
||||
You can install a `diffusers` model from the HuggingFace site using
|
||||
`!import_model` and the HuggingFace repo_id for the model:
|
||||
|
||||
```bash
|
||||
invoke> !import_model andite/anything-v4.0
|
||||
```
|
||||
|
||||
Alternatively, you can download the model to disk and import it from
|
||||
there. The model may be distributed as a ZIP file, or as a Git
|
||||
repository:
|
||||
|
||||
```bash
|
||||
invoke> !import_model C:/Users/fred/Downloads/andite--anything-v4.0
|
||||
```
|
||||
|
||||
!!! tip "The CLI supports file path autocompletion"
|
||||
Type a bit of the path name and hit ++tab++ in order to get a choice of
|
||||
possible completions.
|
||||
|
||||
!!! tip "on Windows, you can drag model files onto the command-line"
|
||||
!!! tip "On Windows, you can drag model files onto the command-line"
|
||||
Once you have typed in `!import_model `, you can drag the
|
||||
model file or directory onto the command-line to insert the model path. This way, you don't need to
|
||||
type it or copy/paste. However, you will need to reverse or
|
||||
double backslashes as noted above.
|
||||
|
||||
Once you have typed in `!import_model `, you can drag the model `.ckpt` file
|
||||
onto the command-line to insert the model path. This way, you don't need to
|
||||
type it or copy/paste.
|
||||
Before installing, the CLI will ask you for a short name and
|
||||
description for the model, whether to make this the default model that
|
||||
is loaded at InvokeAI startup time, and whether to replace its
|
||||
VAE. Generally the answer to the latter question is "no".
|
||||
|
||||
4. Follow the wizard's instructions to complete installation as shown in the
|
||||
example here:
|
||||
### Converting legacy models into `diffusers`
|
||||
|
||||
!!! example ""
|
||||
The CLI `!convert_model` will convert a `.safetensors` or `.ckpt`
|
||||
models file into `diffusers` and install it.This will enable the model
|
||||
to load and run faster without loss of image quality.
|
||||
|
||||
```text
|
||||
invoke> !import_model models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
||||
>> Model import in process. Please enter the values needed to configure this model:
|
||||
The usage is identical to `!import_model`. You may point the command
|
||||
to either a downloaded model file on disk, or to a (non-password
|
||||
protected) URL:
|
||||
|
||||
Name for this model: arabian-nights
|
||||
Description of this model: Arabian Nights Fine Tune v1.0
|
||||
Configuration file for this model: configs/stable-diffusion/v1-inference.yaml
|
||||
Default image width: 512
|
||||
Default image height: 512
|
||||
>> New configuration:
|
||||
arabian-nights:
|
||||
config: configs/stable-diffusion/v1-inference.yaml
|
||||
description: Arabian Nights Fine Tune v1.0
|
||||
height: 512
|
||||
weights: models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
||||
width: 512
|
||||
OK to import [n]? y
|
||||
>> Caching model stable-diffusion-1.4 in system RAM
|
||||
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
||||
| LatentDiffusion: Running in eps-prediction mode
|
||||
| DiffusionWrapper has 859.52 M params.
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Using faster float16 precision
|
||||
```bash
|
||||
invoke> !convert_model C:/Users/fred/Downloads/martians.safetensors
|
||||
```
|
||||
|
||||
If you've previously installed the fine-tune VAE file
|
||||
`vae-ft-mse-840000-ema-pruned.ckpt`, the wizard will also ask you if you want to
|
||||
add this VAE to the model.
|
||||
After a successful conversion, the CLI will offer you the option of
|
||||
deleting the original `.ckpt` or `.safetensors` file.
|
||||
|
||||
The appropriate entry for this model will be added to `configs/models.yaml` and
|
||||
it will be available to use in the CLI immediately.
|
||||
### Optimizing a previously-installed model
|
||||
|
||||
The CLI has additional commands for switching among, viewing, editing, deleting
|
||||
the available models. These are described in
|
||||
[Command Line Client](../features/CLI.md#model-selection-and-importation), but
|
||||
the two most frequently-used are `!models` and `!switch <name of model>`. The
|
||||
first prints a table of models that InvokeAI knows about and their load status.
|
||||
The second will load the requested model and lets you switch back and forth
|
||||
quickly among loaded models.
|
||||
Lastly, if you have previously installed a `.ckpt` or `.safetensors`
|
||||
file and wish to convert it into a `diffusers` model, you can do this
|
||||
without re-downloading and converting the original file using the
|
||||
`!optimize_model` command. Simply pass the short name of an existing
|
||||
installed model:
|
||||
|
||||
```bash
|
||||
invoke> !optimize_model martians-v1.0
|
||||
```
|
||||
|
||||
The model will be converted into `diffusers` format and replace the
|
||||
previously installed version. You will again be offered the
|
||||
opportunity to delete the original `.ckpt` or `.safetensors` file.
|
||||
|
||||
### Related CLI Commands
|
||||
|
||||
There are a whole series of additional model management commands in
|
||||
the CLI that you can read about in [Command-Line
|
||||
Interface](../features/CLI.md). These include:
|
||||
|
||||
* `!models` - List all installed models
|
||||
* `!switch <model name>` - Switch to the indicated model
|
||||
* `!edit_model <model name>` - Edit the indicated model to change its name, description or other properties
|
||||
* `!del_model <model name>` - Delete the indicated model
|
||||
|
||||
### Manually editing `configs/models.yaml`
|
||||
|
||||
### Manually editing of `configs/models.yaml`
|
||||
|
||||
If you are comfortable with a text editor then you may simply edit `models.yaml`
|
||||
directly.
|
||||
|
||||
First you need to download the desired .ckpt file and place it in
|
||||
`models/ldm/stable-diffusion-v1` as descirbed in step #1 in the previous
|
||||
section. Record the path to the weights file, e.g.
|
||||
`models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt`
|
||||
You will need to download the desired `.ckpt/.safetensors` file and
|
||||
place it somewhere on your machine's filesystem. Alternatively, for a
|
||||
`diffusers` model, record the repo_id or download the whole model
|
||||
directory. Then using a **text** editor (e.g. the Windows Notepad
|
||||
application), open the file `configs/models.yaml`, and add a new
|
||||
stanza that follows this model:
|
||||
|
||||
Then using a **text** editor (e.g. the Windows Notepad application), open the
|
||||
file `configs/models.yaml`, and add a new stanza that follows this model:
|
||||
#### A legacy model
|
||||
|
||||
A legacy `.ckpt` or `.safetensors` entry will look like this:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.0:
|
||||
description: A great fine-tune in Arabian Nights style
|
||||
weights: ./models/ldm/stable-diffusion-v1/arabian-nights-1.0.ckpt
|
||||
weights: ./path/to/arabian-nights-1.0.ckpt
|
||||
config: ./configs/stable-diffusion/v1-inference.yaml
|
||||
format: ckpt
|
||||
width: 512
|
||||
height: 512
|
||||
vae: ./models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt
|
||||
default: false
|
||||
```
|
||||
|
||||
| name | description |
|
||||
| :----------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
|
||||
| description | Any description that you want to add to the model to remind you what it is. |
|
||||
| weights | Relative path to the .ckpt weights file for this model. |
|
||||
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `invokeai-configure` script. |
|
||||
| vae | If you want to add a VAE file to the model, then enter its path here. |
|
||||
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
|
||||
Note that `format` is `ckpt` for both `.ckpt` and `.safetensors` files.
|
||||
|
||||
#### A diffusers model
|
||||
|
||||
A stanza for a `diffusers` model will look like this for a HuggingFace
|
||||
model with a repository ID:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.1:
|
||||
description: An even better fine-tune of the Arabian Nights
|
||||
repo_id: captahab/arabian-nights-1.1
|
||||
format: diffusers
|
||||
default: true
|
||||
```
|
||||
|
||||
And for a downloaded directory:
|
||||
|
||||
```yaml
|
||||
arabian-nights-1.1:
|
||||
description: An even better fine-tune of the Arabian Nights
|
||||
path: /path/to/captahab-arabian-nights-1.1
|
||||
format: diffusers
|
||||
default: true
|
||||
```
|
||||
|
||||
There is additional syntax for indicating an external VAE to use with
|
||||
this model. See `INITIAL_MODELS.yaml` and `models.yaml` for examples.
|
||||
|
||||
After you save the modified `models.yaml` file relaunch
|
||||
`invokeai`. The new model will now be available for your use.
|
||||
|
||||
### Installation via the WebUI
|
||||
|
||||
To access the WebUI Model Manager, click on the button that looks like
|
||||
a cute in the upper right side of the browser screen. This will bring
|
||||
up a dialogue that lists the models you have already installed, and
|
||||
allows you to load, delete or edit them:
|
||||
|
||||
<figure markdown>
|
||||
![model-manager](../assets/installing-models/webui-models-1.png)
|
||||
</figure>
|
||||
|
||||
To add a new model, click on **+ Add New** and select to either a
|
||||
checkpoint/safetensors model, or a diffusers model:
|
||||
|
||||
<figure markdown>
|
||||
![model-manager-add-new](../assets/installing-models/webui-models-2.png)
|
||||
</figure>
|
||||
|
||||
In this example, we chose **Add Diffusers**. As shown in the figure
|
||||
below, a new dialogue prompts you to enter the name to use for the
|
||||
model, its description, and either the location of the `diffusers`
|
||||
model on disk, or its Repo ID on the HuggingFace web site. If you
|
||||
choose to enter a path to disk, the system will autocomplete for you
|
||||
as you type:
|
||||
|
||||
<figure markdown>
|
||||
![model-manager-add-diffusers](../assets/installing-models/webui-models-3.png)
|
||||
</figure>
|
||||
|
||||
Press **Add Model** at the bottom of the dialogue (scrolled out of
|
||||
site in the figure), and the model will be downloaded, imported, and
|
||||
registered in `models.yaml`.
|
||||
|
||||
The **Add Checkpoint/Safetensor Model** option is similar, except that
|
||||
in this case you can choose to scan an entire folder for
|
||||
checkpoint/safetensors files to import. Simply type in the path of the
|
||||
directory and press the "Search" icon. This will display the
|
||||
`.ckpt` and `.safetensors` found inside the directory and its
|
||||
subfolders, and allow you to choose which ones to import:
|
||||
|
||||
<figure markdown>
|
||||
![model-manager-add-checkpoint](../assets/installing-models/webui-models-4.png)
|
||||
</figure>
|
||||
|
||||
## Model Management Startup Options
|
||||
|
||||
The `invoke` launcher and the `invokeai` script accept a series of
|
||||
command-line arguments that modify InvokeAI's behavior when loading
|
||||
models. These can be provided on the command line, or added to the
|
||||
InvokeAI root directory's `invokeai.init` initialization file.
|
||||
|
||||
The arguments are:
|
||||
|
||||
* `--model <model name>` -- Start up with the indicated model loaded
|
||||
* `--ckpt_convert` -- When a checkpoint/safetensors model is loaded, convert it into a `diffusers` model in memory. This does not permanently save the converted model to disk.
|
||||
* `--autoconvert <path/to/directory>` -- Scan the indicated directory path for new checkpoint/safetensors files, convert them into `diffusers` models, and import them into InvokeAI.
|
||||
|
||||
Here is an example of providing an argument on the command line using
|
||||
the `invoke.sh` launch script:
|
||||
|
||||
```bash
|
||||
invoke.sh --autoconvert /home/fred/stable-diffusion-checkpoints
|
||||
```
|
||||
|
||||
And here is what the same argument looks like in `invokeai.init`:
|
||||
|
||||
```
|
||||
--outdir="/home/fred/invokeai/outputs
|
||||
--no-nsfw_checker
|
||||
--autoconvert /home/fred/stable-diffusion-checkpoints
|
||||
```
|
||||
|
||||
Save the `models.yaml` and relaunch InvokeAI. The new model should now be
|
||||
available for your use.
|
||||
|
@ -3,7 +3,19 @@ title: Overview
|
||||
---
|
||||
|
||||
We offer several ways to install InvokeAI, each one suited to your
|
||||
experience and preferences.
|
||||
experience and preferences. We suggest that everyone start by
|
||||
reviewing the
|
||||
[hardware](010_INSTALL_AUTOMATED.md#hardware_requirements) and
|
||||
[software](010_INSTALL_AUTOMATED.md#software_requirements)
|
||||
requirements, as they are the same across each install method. Then
|
||||
pick the install method most suitable to your level of experience and
|
||||
needs.
|
||||
|
||||
See the [troubleshooting
|
||||
section](010_INSTALL_AUTOMATED.md#troubleshooting) of the automated
|
||||
install guide for frequently-encountered installation issues.
|
||||
|
||||
## Main Application
|
||||
|
||||
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
|
||||
|
||||
@ -33,3 +45,10 @@ experience and preferences.
|
||||
InvokeAI and its dependencies. This method is recommended for
|
||||
individuals with experience with Docker containers and understand
|
||||
the pluses and minuses of a container-based install.
|
||||
|
||||
## Quick Guides
|
||||
|
||||
* [Installing CUDA and ROCm Drivers](./030_INSTALL_CUDA_AND_ROCM.md)
|
||||
* [Installing XFormers](./070_INSTALL_XFORMERS.md)
|
||||
* [Installing PyPatchMatch](./060_INSTALL_PATCHMATCH.md)
|
||||
* [Installing New Models](./050_INSTALLING_MODELS.md)
|
||||
|
15
installer/create_installer.sh
Normal file → Executable file
@ -56,12 +56,12 @@ rm -rf InvokeAI-Installer
|
||||
|
||||
# copy content
|
||||
mkdir InvokeAI-Installer
|
||||
for f in templates *.py *.txt *.reg; do
|
||||
for f in templates lib *.txt *.reg; do
|
||||
cp -r ${f} InvokeAI-Installer/
|
||||
done
|
||||
|
||||
# Move the wheel
|
||||
mv dist/*.whl InvokeAI-Installer/
|
||||
mv dist/*.whl InvokeAI-Installer/lib/
|
||||
|
||||
# Install scripts
|
||||
# Mac/Linux
|
||||
@ -75,17 +75,6 @@ cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
||||
# Zip everything up
|
||||
zip -r InvokeAI-installer-$VERSION.zip InvokeAI-Installer
|
||||
|
||||
# Updater
|
||||
mkdir tmp
|
||||
cp templates/update.sh.in tmp/update.sh
|
||||
cp templates/update.bat.in tmp/update.bat
|
||||
chmod +x tmp/update.sh
|
||||
chmod +x tmp/update.bat
|
||||
cd tmp
|
||||
zip InvokeAI-updater-$VERSION.zip update.sh update.bat
|
||||
cd ..
|
||||
mv tmp/InvokeAI-updater-$VERSION.zip .
|
||||
|
||||
# clean up
|
||||
rm -rf InvokeAI-Installer tmp dist
|
||||
|
||||
|
@ -66,8 +66,7 @@ del /q .tmp1 .tmp2
|
||||
|
||||
@rem -------------- Install and Configure ---------------
|
||||
|
||||
call python main.py
|
||||
|
||||
call python .\lib\main.py
|
||||
|
||||
@rem ------------------------ Subroutines ---------------
|
||||
@rem routine to do comparison of semantic version numbers
|
||||
|
@ -27,4 +27,4 @@ if [ -z "$PYTHON" ]; then
|
||||
exit -1
|
||||
fi
|
||||
|
||||
exec $PYTHON ./main.py ${@}
|
||||
exec $PYTHON ./lib/main.py ${@}
|
||||
|
@ -359,7 +359,7 @@ class InvokeAiInstance:
|
||||
scripts = ['invoke']
|
||||
|
||||
for script in scripts:
|
||||
src = Path(__file__).parent / "templates" / f"{script}.{ext}.in"
|
||||
src = Path(__file__).parent / '..' / "templates" / f"{script}.{ext}.in"
|
||||
dest = self.runtime / f"{script}.{ext}"
|
||||
shutil.copy(src, dest)
|
||||
os.chmod(dest, 0o0755)
|
@ -9,10 +9,9 @@ from pathlib import Path
|
||||
|
||||
from prompt_toolkit import prompt
|
||||
from prompt_toolkit.completion import PathCompleter
|
||||
from prompt_toolkit.shortcuts import CompleteStyle
|
||||
from prompt_toolkit.validation import Validator
|
||||
from rich import box, print
|
||||
from rich.console import Console, Group
|
||||
from rich.console import Console, Group, group
|
||||
from rich.panel import Panel
|
||||
from rich.prompt import Confirm
|
||||
from rich.style import Style
|
||||
@ -37,17 +36,21 @@ else:
|
||||
|
||||
|
||||
def welcome():
|
||||
|
||||
@group()
|
||||
def text():
|
||||
if (platform_specific := _platform_specific_help()) != "":
|
||||
yield platform_specific
|
||||
yield ""
|
||||
yield Text.from_markup("Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with [i]Control-C[/] and retry.", justify="center")
|
||||
|
||||
console.rule()
|
||||
print(
|
||||
Panel(
|
||||
title="[bold wheat1]Welcome to the InvokeAI Installer",
|
||||
renderable=Text(
|
||||
"Some of the installation steps take a long time to run. Please be patient. If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry.",
|
||||
justify="center",
|
||||
),
|
||||
renderable=text(),
|
||||
box=box.DOUBLE,
|
||||
width=80,
|
||||
expand=False,
|
||||
expand=True,
|
||||
padding=(1, 2),
|
||||
style=Style(bgcolor="grey23", color="orange1"),
|
||||
subtitle=f"[bold grey39]{OS}-{ARCH}",
|
||||
@ -200,7 +203,7 @@ def graphical_accelerator():
|
||||
[
|
||||
f"Detected the [gold1]{OS}-{ARCH}[/] platform",
|
||||
"",
|
||||
"See [steel_blue3]https://invoke-ai.github.io/InvokeAI/#system[/] to ensure your system meets the minimum requirements.",
|
||||
"See [deep_sky_blue1]https://invoke-ai.github.io/InvokeAI/#system[/] to ensure your system meets the minimum requirements.",
|
||||
"",
|
||||
"[red3]🠶[/] [b]Your GPU drivers must be correctly installed before using InvokeAI![/] [red3]🠴[/]",
|
||||
]
|
||||
@ -294,3 +297,16 @@ def introduction() -> None:
|
||||
)
|
||||
)
|
||||
console.line(2)
|
||||
|
||||
def _platform_specific_help()->str:
|
||||
if OS == "Darwin":
|
||||
text = Text.from_markup("""[b wheat1]macOS Users![/]\n\nPlease be sure you have the [b wheat1]Xcode command-line tools[/] installed before continuing.\nIf not, cancel with [i]Control-C[/] and follow the Xcode install instructions at [deep_sky_blue1]https://www.freecodecamp.org/news/install-xcode-command-line-tools/[/].""")
|
||||
elif OS == "Windows":
|
||||
text = Text.from_markup("""[b wheat1]Windows Users![/]\n\nBefore you start, please do the following:
|
||||
1. Double-click on the file [b wheat1]WinLongPathsEnabled.reg[/] in order to
|
||||
enable long path support on your system.
|
||||
2. Make sure you have the [b wheat1]Visual C++ core libraries[/] installed. If not, install from
|
||||
[deep_sky_blue1]https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170[/]""")
|
||||
else:
|
||||
text = ""
|
||||
return text
|
@ -13,7 +13,8 @@ echo 3. run textual inversion training
|
||||
echo 4. merge models (diffusers type only)
|
||||
echo 5. re-run the configure script to download new models
|
||||
echo 6. open the developer console
|
||||
set /P restore="Please enter 1, 2, 3, 4 or 5: [2] "
|
||||
echo 7. command-line help
|
||||
set /P restore="Please enter 1, 2, 3, 4, 5, 6 or 7: [2] "
|
||||
if not defined restore set restore=2
|
||||
IF /I "%restore%" == "1" (
|
||||
echo Starting the InvokeAI command-line..
|
||||
@ -42,6 +43,11 @@ IF /I "%restore%" == "1" (
|
||||
echo *************************
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE IF /I "%restore%" == "7" (
|
||||
echo Displaying command line help...
|
||||
python .venv\Scripts\invokeai.exe --help %*
|
||||
pause
|
||||
exit /b
|
||||
) ELSE (
|
||||
echo Invalid selection
|
||||
pause
|
||||
|
@ -1,5 +1,5 @@
|
||||
stable-diffusion-1.5:
|
||||
description: Stable Diffusion version 1.5 weight file (4.27 GB)
|
||||
description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
|
||||
repo_id: runwayml/stable-diffusion-v1-5
|
||||
format: diffusers
|
||||
vae:
|
||||
@ -7,14 +7,14 @@ stable-diffusion-1.5:
|
||||
recommended: True
|
||||
default: True
|
||||
inpainting-1.5:
|
||||
description: RunwayML SD 1.5 model optimized for inpainting (4.27 GB)
|
||||
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
|
||||
repo_id: runwayml/stable-diffusion-inpainting
|
||||
format: diffusers
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: True
|
||||
dreamlike-diffusion-1.0:
|
||||
description: An SD 1.5 model fine tuned on high quality art by dreamlike.art
|
||||
description: An SD 1.5 model fine tuned on high quality art by dreamlike.art, diffusers version (2.13 BG)
|
||||
format: diffusers
|
||||
repo_id: dreamlike-art/dreamlike-diffusion-1.0
|
||||
vae:
|
||||
@ -49,9 +49,8 @@ nitro-diffusion-1.0:
|
||||
vae:
|
||||
repo_id: stabilityai/sd-vae-ft-mse
|
||||
recommended: False
|
||||
|
||||
trinart-2.0:
|
||||
description: An SD model finetuned with ~40,000 assorted high resolution manga/anime-style pictures (2.13 GB)
|
||||
description: An SD model finetuned with ~40,000 assorted high resolution manga/anime-style pictures, diffusers version (2.13 GB)
|
||||
repo_id: naclbit/trinart_stable_diffusion_v2
|
||||
format: diffusers
|
||||
vae:
|
||||
|
13
invokeai/frontend/.babelrc
Normal file
@ -0,0 +1,13 @@
|
||||
{
|
||||
"plugins": [
|
||||
[
|
||||
"transform-imports",
|
||||
{
|
||||
"lodash": {
|
||||
"transform": "lodash/${member}",
|
||||
"preventFullImport": true
|
||||
}
|
||||
}
|
||||
]
|
||||
]
|
||||
}
|
5
invokeai/frontend/.eslintignore
Normal file
@ -0,0 +1,5 @@
|
||||
dist/
|
||||
.husky/
|
||||
node_modules/
|
||||
patches/
|
||||
public/
|
@ -1,13 +0,0 @@
|
||||
module.exports = {
|
||||
extends: [
|
||||
'eslint:recommended',
|
||||
'plugin:@typescript-eslint/recommended',
|
||||
'plugin:react-hooks/recommended',
|
||||
],
|
||||
parser: '@typescript-eslint/parser',
|
||||
plugins: ['@typescript-eslint', 'eslint-plugin-react-hooks'],
|
||||
root: true,
|
||||
rules: {
|
||||
'@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '_+' }],
|
||||
},
|
||||
};
|
40
invokeai/frontend/.eslintrc.js
Normal file
@ -0,0 +1,40 @@
|
||||
module.exports = {
|
||||
env: {
|
||||
browser: true,
|
||||
es6: true,
|
||||
node: true,
|
||||
},
|
||||
extends: [
|
||||
'eslint:recommended',
|
||||
'plugin:@typescript-eslint/recommended',
|
||||
'plugin:react/recommended',
|
||||
'plugin:react-hooks/recommended',
|
||||
'plugin:prettier/recommended',
|
||||
'plugin:react/jsx-runtime',
|
||||
],
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
ecmaFeatures: {
|
||||
jsx: true,
|
||||
},
|
||||
ecmaVersion: 2018,
|
||||
sourceType: 'module',
|
||||
},
|
||||
plugins: ['react', '@typescript-eslint', 'eslint-plugin-react-hooks'],
|
||||
root: true,
|
||||
rules: {
|
||||
'react-hooks/exhaustive-deps': 'error',
|
||||
'no-var': 'error',
|
||||
'brace-style': 'error',
|
||||
'prefer-template': 'error',
|
||||
radix: 'error',
|
||||
'space-before-blocks': 'error',
|
||||
'import/prefer-default-export': 'off',
|
||||
'@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '_+' }],
|
||||
},
|
||||
settings: {
|
||||
react: {
|
||||
version: 'detect',
|
||||
},
|
||||
},
|
||||
};
|
3
invokeai/frontend/.gitignore
vendored
@ -23,3 +23,6 @@ dist-ssr
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
|
||||
# build stats
|
||||
stats.html
|
4
invokeai/frontend/.husky/pre-commit
Executable file
@ -0,0 +1,4 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
cd invokeai/frontend/ && npx run lint
|
5
invokeai/frontend/.prettierignore
Normal file
@ -0,0 +1,5 @@
|
||||
dist/
|
||||
.husky/
|
||||
node_modules/
|
||||
patches/
|
||||
public/
|
6
invokeai/frontend/.prettierrc.js
Normal file
@ -0,0 +1,6 @@
|
||||
module.exports = {
|
||||
trailingComma: 'es5',
|
||||
tabWidth: 2,
|
||||
semi: true,
|
||||
singleQuote: true,
|
||||
};
|
Before Width: | Height: | Size: 116 KiB After Width: | Height: | Size: 116 KiB |
638
invokeai/frontend/dist/assets/index-8606d352.js
vendored
Normal file
1
invokeai/frontend/dist/assets/index-b0bf79f4.css
vendored
Normal file
625
invokeai/frontend/dist/assets/index.b7daf15c.js
vendored
Before Width: | Height: | Size: 43 KiB After Width: | Height: | Size: 43 KiB |
13
invokeai/frontend/dist/index.html
vendored
@ -1,23 +1,16 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<script type="module" crossorigin src="./assets/polyfills.1ff60148.js"></script>
|
||||
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index.b7daf15c.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.1536494e.css">
|
||||
<script type="module">try{import.meta.url;import("_").catch(()=>1);}catch(e){}window.__vite_is_modern_browser=true;</script>
|
||||
<script type="module">!function(){if(window.__vite_is_modern_browser)return;console.warn("vite: loading legacy build because dynamic import or import.meta.url is unsupported, syntax error above should be ignored");var e=document.getElementById("vite-legacy-polyfill"),n=document.createElement("script");n.src=e.src,n.onload=function(){System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))},document.body.appendChild(n)}();</script>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index-8606d352.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index-b0bf79f4.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
|
||||
<script nomodule>!function(){var e=document,t=e.createElement("script");if(!("noModule"in t)&&"onbeforeload"in t){var n=!1;e.addEventListener("beforeload",(function(e){if(e.target===t)n=!0;else if(!e.target.hasAttribute("nomodule")||!n)return;e.preventDefault()}),!0),t.type="module",t.src=".",e.head.appendChild(t),t.remove()}}();</script>
|
||||
<script nomodule crossorigin id="vite-legacy-polyfill" src="./assets/polyfills-legacy-dde3a68a.js"></script>
|
||||
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-7649c4ae.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -1,23 +0,0 @@
|
||||
{
|
||||
"eslintConfig": {
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"plugin:@typescript-eslint/recommended",
|
||||
"plugin:react-hooks/recommended"
|
||||
],
|
||||
"parser": "@typescript-eslint/parser",
|
||||
"plugins": ["@typescript-eslint", "eslint-plugin-react-hooks"],
|
||||
"root": true,
|
||||
"settings": {
|
||||
"import/resolver": {
|
||||
"node": {
|
||||
"paths": ["src"],
|
||||
"extensions": [".js", ".jsx", ".ts", ".tsx"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"rules": {
|
||||
"react/jsx-filename-extension": [1, { "extensions": [".tsx", ".ts"] }]
|
||||
}
|
||||
}
|
||||
}
|
@ -2,15 +2,15 @@
|
||||
"name": "invoke-ai-ui",
|
||||
"private": true,
|
||||
"version": "0.0.1",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"prepare": "cd ../../ && husky install invokeai/frontend/.husky",
|
||||
"dev": "vite dev",
|
||||
"build": "tsc && vite build",
|
||||
"build-dev": "tsc && vite build -m development",
|
||||
"preview": "vite preview",
|
||||
"madge": "madge --circular src/main.tsx",
|
||||
"lint": "eslint src/",
|
||||
"prettier": "prettier *.{json,cjs,ts,html} src/**/*.{ts,tsx}",
|
||||
"lint": "eslint --fix .",
|
||||
"lint-staged": "lint-staged",
|
||||
"prettier": "prettier *.{json,js,ts,html} src/**/*.{ts,tsx,scss} --write .",
|
||||
"fmt": "npm run prettier -- --write",
|
||||
"postinstall": "patch-package"
|
||||
},
|
||||
@ -25,6 +25,7 @@
|
||||
"@radix-ui/react-tooltip": "^1.0.2",
|
||||
"@reduxjs/toolkit": "^1.8.5",
|
||||
"@types/uuid": "^8.3.4",
|
||||
"@vitejs/plugin-react-swc": "^3.1.0",
|
||||
"add": "^2.0.6",
|
||||
"dateformat": "^5.0.3",
|
||||
"formik": "^2.2.9",
|
||||
@ -62,22 +63,26 @@
|
||||
"@types/react-transition-group": "^4.4.5",
|
||||
"@typescript-eslint/eslint-plugin": "^5.36.2",
|
||||
"@typescript-eslint/parser": "^5.36.2",
|
||||
"@vitejs/plugin-legacy": "^3.0.1",
|
||||
"@vitejs/plugin-react": "^2.0.1",
|
||||
"babel-plugin-transform-imports": "^2.0.0",
|
||||
"eslint": "^8.23.0",
|
||||
"eslint-config-prettier": "^8.6.0",
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
"eslint-plugin-react": "^7.32.2",
|
||||
"eslint-plugin-react-hooks": "^4.6.0",
|
||||
"husky": "^8.0.3",
|
||||
"lint-staged": "^13.1.0",
|
||||
"madge": "^5.0.1",
|
||||
"patch-package": "^6.5.0",
|
||||
"postinstall-postinstall": "^2.1.0",
|
||||
"prettier": "^2.8.1",
|
||||
"prettier": "^2.8.3",
|
||||
"rollup-plugin-visualizer": "^5.9.0",
|
||||
"sass": "^1.55.0",
|
||||
"terser": "^5.16.1",
|
||||
"tsc-watch": "^5.0.3",
|
||||
"typescript": "^4.6.4",
|
||||
"vite": "^3.0.7",
|
||||
"typescript": "^5.0.0-beta",
|
||||
"vite": "^4.1.1",
|
||||
"vite-plugin-eslint": "^1.8.1",
|
||||
"vite-tsconfig-paths": "^3.5.2"
|
||||
"vite-tsconfig-paths": "^4.0.5"
|
||||
},
|
||||
"madge": {
|
||||
"detectiveOptions": {
|
||||
@ -88,5 +93,11 @@
|
||||
"skipTypeImports": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"lint-staged": {
|
||||
"**/*.{js,jsx,ts,tsx,cjs}": [
|
||||
"npx prettier --write",
|
||||
"npx eslint --fix"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -1,14 +1,14 @@
|
||||
import ImageUploader from 'common/components/ImageUploader';
|
||||
import Console from 'features/system/components/Console';
|
||||
import ProgressBar from 'features/system/components/ProgressBar';
|
||||
import SiteHeader from 'features/system/components/SiteHeader';
|
||||
import Console from 'features/system/components/Console';
|
||||
import InvokeTabs from 'features/ui/components/InvokeTabs';
|
||||
import { keepGUIAlive } from './utils';
|
||||
import InvokeTabs from 'features/tabs/components/InvokeTabs';
|
||||
import ImageUploader from 'common/components/ImageUploader';
|
||||
|
||||
import useToastWatcher from 'features/system/hooks/useToastWatcher';
|
||||
|
||||
import FloatingOptionsPanelButtons from 'features/tabs/components/FloatingOptionsPanelButtons';
|
||||
import FloatingGalleryButton from 'features/tabs/components/FloatingGalleryButton';
|
||||
import FloatingGalleryButton from 'features/ui/components/FloatingGalleryButton';
|
||||
import FloatingParametersPanelButtons from 'features/ui/components/FloatingParametersPanelButtons';
|
||||
|
||||
keepGUIAlive();
|
||||
|
||||
@ -27,7 +27,7 @@ const App = () => {
|
||||
<Console />
|
||||
</div>
|
||||
</ImageUploader>
|
||||
<FloatingOptionsPanelButtons />
|
||||
<FloatingParametersPanelButtons />
|
||||
<FloatingGalleryButton />
|
||||
</div>
|
||||
);
|
||||
|
@ -16,6 +16,20 @@ export const SAMPLERS: Array<string> = [
|
||||
'k_heun',
|
||||
];
|
||||
|
||||
// Valid Diffusers Samplers
|
||||
export const DIFFUSERS_SAMPLERS: Array<string> = [
|
||||
'ddim',
|
||||
'plms',
|
||||
'k_lms',
|
||||
'dpmpp_2',
|
||||
'k_dpm_2',
|
||||
'k_dpm_2_a',
|
||||
'k_dpmpp_2',
|
||||
'k_euler',
|
||||
'k_euler_a',
|
||||
'k_heun',
|
||||
];
|
||||
|
||||
// Valid image widths
|
||||
export const WIDTHS: Array<number> = [
|
||||
64, 128, 192, 256, 320, 384, 448, 512, 576, 640, 704, 768, 832, 896, 960,
|
||||
|
2
invokeai/frontend/src/app/invokeai.d.ts
vendored
@ -12,7 +12,7 @@
|
||||
* 'gfpgan'.
|
||||
*/
|
||||
|
||||
import { InvokeTabName } from 'features/tabs/tabMap';
|
||||
import { InvokeTabName } from 'features/ui/store/tabMap';
|
||||
import { IRect } from 'konva/lib/types';
|
||||
|
||||
/**
|
||||
|
@ -1,32 +1,26 @@
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import _ from 'lodash';
|
||||
import { RootState } from 'app/store';
|
||||
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
||||
import { OptionsState } from 'features/options/store/optionsSlice';
|
||||
import { SystemState } from 'features/system/store/systemSlice';
|
||||
import { validateSeedWeights } from 'common/util/seedWeightPairs';
|
||||
import { initialCanvasImageSelector } from 'features/canvas/store/canvasSelectors';
|
||||
import { generationSelector } from 'features/parameters/store/generationSelectors';
|
||||
import { systemSelector } from 'features/system/store/systemSelectors';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import { isEqual } from 'lodash';
|
||||
|
||||
export const readinessSelector = createSelector(
|
||||
[
|
||||
(state: RootState) => state.options,
|
||||
(state: RootState) => state.system,
|
||||
generationSelector,
|
||||
systemSelector,
|
||||
initialCanvasImageSelector,
|
||||
activeTabNameSelector,
|
||||
],
|
||||
(
|
||||
options: OptionsState,
|
||||
system: SystemState,
|
||||
initialCanvasImage,
|
||||
activeTabName
|
||||
) => {
|
||||
(generation, system, initialCanvasImage, activeTabName) => {
|
||||
const {
|
||||
prompt,
|
||||
shouldGenerateVariations,
|
||||
seedWeights,
|
||||
initialImage,
|
||||
seed,
|
||||
} = options;
|
||||
} = generation;
|
||||
|
||||
const { isProcessing, isConnected } = system;
|
||||
|
||||
@ -71,8 +65,8 @@ export const readinessSelector = createSelector(
|
||||
},
|
||||
{
|
||||
memoizeOptions: {
|
||||
equalityCheck: _.isEqual,
|
||||
resultEqualityCheck: _.isEqual,
|
||||
equalityCheck: isEqual,
|
||||
resultEqualityCheck: isEqual,
|
||||
},
|
||||
}
|
||||
);
|
||||
|
@ -1,7 +1,7 @@
|
||||
import { createAction } from '@reduxjs/toolkit';
|
||||
import { GalleryCategory } from 'features/gallery/store/gallerySlice';
|
||||
import { InvokeTabName } from 'features/tabs/tabMap';
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
import { GalleryCategory } from 'features/gallery/store/gallerySlice';
|
||||
import { InvokeTabName } from 'features/ui/store/tabMap';
|
||||
|
||||
/**
|
||||
* We can't use redux-toolkit's createSlice() to make these actions,
|
||||
|
@ -1,25 +1,24 @@
|
||||
import { AnyAction, Dispatch, MiddlewareAPI } from '@reduxjs/toolkit';
|
||||
import dateFormat from 'dateformat';
|
||||
import { Socket } from 'socket.io-client';
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
import type { RootState } from 'app/store';
|
||||
import {
|
||||
frontendToBackendParameters,
|
||||
FrontendToBackendParametersConfig,
|
||||
} from 'common/util/parameterTranslation';
|
||||
import dateFormat from 'dateformat';
|
||||
import {
|
||||
GalleryCategory,
|
||||
GalleryState,
|
||||
removeImage,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
import { OptionsState } from 'features/options/store/optionsSlice';
|
||||
import {
|
||||
addLogEntry,
|
||||
generationRequested,
|
||||
modelChangeRequested,
|
||||
setIsProcessing,
|
||||
} from 'features/system/store/systemSlice';
|
||||
import { InvokeTabName } from 'features/tabs/tabMap';
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
import type { RootState } from 'app/store';
|
||||
import { InvokeTabName } from 'features/ui/store/tabMap';
|
||||
import { Socket } from 'socket.io-client';
|
||||
|
||||
/**
|
||||
* Returns an object containing all functions which use `socketio.emit()`.
|
||||
@ -39,7 +38,8 @@ const makeSocketIOEmitters = (
|
||||
const state: RootState = getState();
|
||||
|
||||
const {
|
||||
options: optionsState,
|
||||
generation: generationState,
|
||||
postprocessing: postprocessingState,
|
||||
system: systemState,
|
||||
canvas: canvasState,
|
||||
} = state;
|
||||
@ -47,7 +47,8 @@ const makeSocketIOEmitters = (
|
||||
const frontendToBackendParametersConfig: FrontendToBackendParametersConfig =
|
||||
{
|
||||
generationMode,
|
||||
optionsState,
|
||||
generationState,
|
||||
postprocessingState,
|
||||
canvasState,
|
||||
systemState,
|
||||
};
|
||||
@ -90,8 +91,11 @@ const makeSocketIOEmitters = (
|
||||
},
|
||||
emitRunESRGAN: (imageToProcess: InvokeAI.Image) => {
|
||||
dispatch(setIsProcessing(true));
|
||||
const options: OptionsState = getState().options;
|
||||
const { upscalingLevel, upscalingStrength } = options;
|
||||
|
||||
const {
|
||||
postprocessing: { upscalingLevel, upscalingStrength },
|
||||
} = getState();
|
||||
|
||||
const esrganParameters = {
|
||||
upscale: [upscalingLevel, upscalingStrength],
|
||||
};
|
||||
@ -111,8 +115,10 @@ const makeSocketIOEmitters = (
|
||||
},
|
||||
emitRunFacetool: (imageToProcess: InvokeAI.Image) => {
|
||||
dispatch(setIsProcessing(true));
|
||||
const options: OptionsState = getState().options;
|
||||
const { facetoolType, facetoolStrength, codeformerFidelity } = options;
|
||||
|
||||
const {
|
||||
postprocessing: { facetoolType, facetoolStrength, codeformerFidelity },
|
||||
} = getState();
|
||||
|
||||
const facetoolParameters: Record<string, unknown> = {
|
||||
facetool_strength: facetoolStrength,
|
||||
|
@ -1,24 +1,24 @@
|
||||
import { AnyAction, MiddlewareAPI, Dispatch } from '@reduxjs/toolkit';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
import { AnyAction, Dispatch, MiddlewareAPI } from '@reduxjs/toolkit';
|
||||
import dateFormat from 'dateformat';
|
||||
import i18n from 'i18n';
|
||||
import { v4 as uuidv4 } from 'uuid';
|
||||
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
|
||||
import {
|
||||
addLogEntry,
|
||||
addToast,
|
||||
errorOccurred,
|
||||
processingCanceled,
|
||||
setCurrentStatus,
|
||||
setFoundModels,
|
||||
setIsCancelable,
|
||||
setIsConnected,
|
||||
setIsProcessing,
|
||||
setSystemStatus,
|
||||
setCurrentStatus,
|
||||
setSystemConfig,
|
||||
processingCanceled,
|
||||
errorOccurred,
|
||||
setModelList,
|
||||
setIsCancelable,
|
||||
addToast,
|
||||
setFoundModels,
|
||||
setSearchFolder,
|
||||
setSystemConfig,
|
||||
setSystemStatus,
|
||||
} from 'features/system/store/systemSlice';
|
||||
|
||||
import {
|
||||
@ -30,20 +30,20 @@ import {
|
||||
setIntermediateImage,
|
||||
} from 'features/gallery/store/gallerySlice';
|
||||
|
||||
import type { RootState } from 'app/store';
|
||||
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
|
||||
import {
|
||||
clearInitialImage,
|
||||
setInfillMethod,
|
||||
setInitialImage,
|
||||
setMaskPath,
|
||||
} from 'features/options/store/optionsSlice';
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import { tabMap } from 'features/ui/store/tabMap';
|
||||
import {
|
||||
requestImages,
|
||||
requestNewImages,
|
||||
requestSystemConfig,
|
||||
} from './actions';
|
||||
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
|
||||
import { tabMap } from 'features/tabs/tabMap';
|
||||
import type { RootState } from 'app/store';
|
||||
|
||||
/**
|
||||
* Returns an object containing listener callbacks for socketio events.
|
||||
@ -104,8 +104,9 @@ const makeSocketIOListeners = (
|
||||
*/
|
||||
onGenerationResult: (data: InvokeAI.ImageResultResponse) => {
|
||||
try {
|
||||
const state: RootState = getState();
|
||||
const { shouldLoopback, activeTab } = state.options;
|
||||
const state = getState();
|
||||
const { activeTab } = state.ui;
|
||||
const { shouldLoopback } = state.postprocessing;
|
||||
const { boundingBox: _, generationMode, ...rest } = data;
|
||||
|
||||
const newImage = {
|
||||
@ -327,7 +328,9 @@ const makeSocketIOListeners = (
|
||||
dispatch(removeImage(data));
|
||||
|
||||
// remove references to image in options
|
||||
const { initialImage, maskPath } = getState().options;
|
||||
const {
|
||||
generation: { initialImage, maskPath },
|
||||
} = getState();
|
||||
|
||||
if (
|
||||
initialImage === url ||
|
||||
|
@ -1,8 +1,8 @@
|
||||
import { Middleware } from '@reduxjs/toolkit';
|
||||
import { io } from 'socket.io-client';
|
||||
|
||||
import makeSocketIOListeners from './listeners';
|
||||
import makeSocketIOEmitters from './emitters';
|
||||
import makeSocketIOListeners from './listeners';
|
||||
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
|
||||
@ -26,7 +26,7 @@ export const socketioMiddleware = () => {
|
||||
|
||||
const socketio = io(origin, {
|
||||
timeout: 60000,
|
||||
path: window.location.pathname + 'socket.io',
|
||||
path: `${window.location.pathname}socket.io`,
|
||||
});
|
||||
|
||||
let areListenersSet = false;
|
||||
|
@ -5,10 +5,13 @@ import storage from 'redux-persist/lib/storage'; // defaults to localStorage for
|
||||
|
||||
import { getPersistConfig } from 'redux-deep-persist';
|
||||
|
||||
import optionsReducer from 'features/options/store/optionsSlice';
|
||||
import galleryReducer from 'features/gallery/store/gallerySlice';
|
||||
import systemReducer from 'features/system/store/systemSlice';
|
||||
import canvasReducer from 'features/canvas/store/canvasSlice';
|
||||
import galleryReducer from 'features/gallery/store/gallerySlice';
|
||||
import lightboxReducer from 'features/lightbox/store/lightboxSlice';
|
||||
import generationReducer from 'features/parameters/store/generationSlice';
|
||||
import postprocessingReducer from 'features/parameters/store/postprocessingSlice';
|
||||
import systemReducer from 'features/system/store/systemSlice';
|
||||
import uiReducer from 'features/ui/store/uiSlice';
|
||||
|
||||
import { socketioMiddleware } from './socketio/middleware';
|
||||
|
||||
@ -58,10 +61,13 @@ const galleryBlacklist = [
|
||||
].map((blacklistItem) => `gallery.${blacklistItem}`);
|
||||
|
||||
const rootReducer = combineReducers({
|
||||
options: optionsReducer,
|
||||
generation: generationReducer,
|
||||
postprocessing: postprocessingReducer,
|
||||
gallery: galleryReducer,
|
||||
system: systemReducer,
|
||||
canvas: canvasReducer,
|
||||
ui: uiReducer,
|
||||
lightbox: lightboxReducer,
|
||||
});
|
||||
|
||||
const rootPersistConfig = getPersistConfig({
|
||||
@ -89,8 +95,8 @@ export const store = configureStore({
|
||||
'canvas/setStageCoordinates',
|
||||
'canvas/setStageScale',
|
||||
'canvas/setIsDrawing',
|
||||
// 'canvas/setBoundingBoxCoordinates',
|
||||
// 'canvas/setBoundingBoxDimensions',
|
||||
'canvas/setBoundingBoxCoordinates',
|
||||
'canvas/setBoundingBoxDimensions',
|
||||
'canvas/setIsDrawing',
|
||||
'canvas/addPointToCurrentLine',
|
||||
],
|
||||
|
@ -1,7 +1,7 @@
|
||||
import { Box, forwardRef, Icon } from '@chakra-ui/react';
|
||||
import { Feature } from 'app/features';
|
||||
import { IconType } from 'react-icons';
|
||||
import { MdHelp } from 'react-icons/md';
|
||||
import { Feature } from 'app/features';
|
||||
import GuidePopover from './GuidePopover';
|
||||
|
||||
type GuideIconProps = {
|
||||
|
@ -1,29 +1,29 @@
|
||||
import {
|
||||
Box,
|
||||
Popover,
|
||||
PopoverArrow,
|
||||
PopoverContent,
|
||||
PopoverTrigger,
|
||||
Box,
|
||||
} from '@chakra-ui/react';
|
||||
import { SystemState } from 'features/system/store/systemSlice';
|
||||
import { useAppSelector } from 'app/storeHooks';
|
||||
import { RootState } from 'app/store';
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { ReactElement } from 'react';
|
||||
import { Feature, useFeatureHelpInfo } from 'app/features';
|
||||
import { useAppSelector } from 'app/storeHooks';
|
||||
import { systemSelector } from 'features/system/store/systemSelectors';
|
||||
import { SystemState } from 'features/system/store/systemSlice';
|
||||
import { ReactElement } from 'react';
|
||||
|
||||
type GuideProps = {
|
||||
children: ReactElement;
|
||||
feature: Feature;
|
||||
};
|
||||
|
||||
const systemSelector = createSelector(
|
||||
(state: RootState) => state.system,
|
||||
const guidePopoverSelector = createSelector(
|
||||
systemSelector,
|
||||
(system: SystemState) => system.shouldDisplayGuides
|
||||
);
|
||||
|
||||
const GuidePopover = ({ children, feature }: GuideProps) => {
|
||||
const shouldDisplayGuides = useAppSelector(systemSelector);
|
||||
const shouldDisplayGuides = useAppSelector(guidePopoverSelector);
|
||||
const { text } = useFeatureHelpInfo(feature);
|
||||
|
||||
if (!shouldDisplayGuides) return null;
|
||||
|
@ -1,9 +1,9 @@
|
||||
import {
|
||||
IconButtonProps,
|
||||
forwardRef,
|
||||
IconButton,
|
||||
IconButtonProps,
|
||||
Tooltip,
|
||||
TooltipProps,
|
||||
forwardRef,
|
||||
} from '@chakra-ui/react';
|
||||
|
||||
export type IAIIconButtonProps = IconButtonProps & {
|
||||
|
@ -1,19 +1,20 @@
|
||||
import {
|
||||
FormControl,
|
||||
FormControlProps,
|
||||
FormLabel,
|
||||
FormLabelProps,
|
||||
NumberDecrementStepper,
|
||||
NumberIncrementStepper,
|
||||
NumberInput,
|
||||
NumberInputField,
|
||||
NumberIncrementStepper,
|
||||
NumberDecrementStepper,
|
||||
NumberInputProps,
|
||||
FormLabel,
|
||||
NumberInputFieldProps,
|
||||
NumberInputProps,
|
||||
NumberInputStepperProps,
|
||||
FormControlProps,
|
||||
FormLabelProps,
|
||||
TooltipProps,
|
||||
Tooltip,
|
||||
TooltipProps,
|
||||
} from '@chakra-ui/react';
|
||||
import _ from 'lodash';
|
||||
import { clamp } from 'lodash';
|
||||
|
||||
import { FocusEvent, useEffect, useState } from 'react';
|
||||
|
||||
const numberStringRegex = /^-?(0\.)?\.?$/;
|
||||
@ -104,7 +105,7 @@ const IAINumberInput = (props: Props) => {
|
||||
* clamp it on blur and floor it if needed.
|
||||
*/
|
||||
const handleBlur = (e: FocusEvent<HTMLInputElement>) => {
|
||||
const clamped = _.clamp(
|
||||
const clamped = clamp(
|
||||
isInteger ? Math.floor(Number(e.target.value)) : Number(e.target.value),
|
||||
min,
|
||||
max
|
||||
|
@ -1,11 +1,11 @@
|
||||
import {
|
||||
BoxProps,
|
||||
Popover,
|
||||
PopoverArrow,
|
||||
PopoverContent,
|
||||
PopoverProps,
|
||||
PopoverTrigger,
|
||||
BoxProps,
|
||||
} from '@chakra-ui/react';
|
||||
import { PopoverProps } from '@chakra-ui/react';
|
||||
import { ReactNode } from 'react';
|
||||
|
||||
type IAIPopoverProps = PopoverProps & {
|
||||
|
@ -23,10 +23,11 @@ import {
|
||||
Tooltip,
|
||||
TooltipProps,
|
||||
} from '@chakra-ui/react';
|
||||
import React, { FocusEvent, useMemo, useState, useEffect } from 'react';
|
||||
import { clamp } from 'lodash';
|
||||
|
||||
import { FocusEvent, useEffect, useMemo, useState } from 'react';
|
||||
import { BiReset } from 'react-icons/bi';
|
||||
import IAIIconButton, { IAIIconButtonProps } from './IAIIconButton';
|
||||
import _ from 'lodash';
|
||||
|
||||
export type IAIFullSliderProps = {
|
||||
label: string;
|
||||
@ -122,7 +123,7 @@ export default function IAISlider(props: IAIFullSliderProps) {
|
||||
|
||||
const handleInputBlur = (e: FocusEvent<HTMLInputElement>) => {
|
||||
if (e.target.value === '') e.target.value = String(min);
|
||||
const clamped = _.clamp(
|
||||
const clamped = clamp(
|
||||
isInteger ? Math.floor(Number(e.target.value)) : Number(localInputValue),
|
||||
min,
|
||||
numberInputMax
|
||||
|
@ -1,20 +1,20 @@
|
||||
import {
|
||||
useCallback,
|
||||
ReactNode,
|
||||
useState,
|
||||
useEffect,
|
||||
KeyboardEvent,
|
||||
} from 'react';
|
||||
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||
import { FileRejection, useDropzone } from 'react-dropzone';
|
||||
import { useToast } from '@chakra-ui/react';
|
||||
import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext';
|
||||
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
||||
import { tabDict } from 'features/tabs/components/InvokeTabs';
|
||||
import ImageUploadOverlay from './ImageUploadOverlay';
|
||||
import { uploadImage } from 'features/gallery/store/thunks/uploadImage';
|
||||
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||
import useImageUploader from 'common/hooks/useImageUploader';
|
||||
import { uploadImage } from 'features/gallery/store/thunks/uploadImage';
|
||||
import { tabDict } from 'features/ui/components/InvokeTabs';
|
||||
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
|
||||
import {
|
||||
KeyboardEvent,
|
||||
ReactNode,
|
||||
useCallback,
|
||||
useEffect,
|
||||
useState,
|
||||
} from 'react';
|
||||
import { FileRejection, useDropzone } from 'react-dropzone';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import ImageUploadOverlay from './ImageUploadOverlay';
|
||||
|
||||
type ImageUploaderProps = {
|
||||
children: ReactNode;
|
||||
@ -33,7 +33,7 @@ const ImageUploader = (props: ImageUploaderProps) => {
|
||||
(rejection: FileRejection) => {
|
||||
setIsHandlingUpload(true);
|
||||
const msg = rejection.errors.reduce(
|
||||
(acc: string, cur: { message: string }) => acc + '\n' + cur.message,
|
||||
(acc: string, cur: { message: string }) => `${acc}\n${cur.message}`,
|
||||
''
|
||||
);
|
||||
toast({
|
||||
|
@ -1,7 +1,7 @@
|
||||
import { Heading } from '@chakra-ui/react';
|
||||
import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext';
|
||||
import { useContext } from 'react';
|
||||
import { FaUpload } from 'react-icons/fa';
|
||||
import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext';
|
||||
|
||||
type ImageUploaderButtonProps = {
|
||||
styleClass?: string;
|
||||
|
@ -1,6 +1,6 @@
|
||||
import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext';
|
||||
import { useContext } from 'react';
|
||||
import { FaUpload } from 'react-icons/fa';
|
||||
import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext';
|
||||
import IAIIconButton from './IAIIconButton';
|
||||
|
||||
const ImageUploaderIconButton = () => {
|
||||
|