Merge branch 'development' into fix-high-step-count

This commit is contained in:
Lincoln Stein 2022-10-21 06:55:17 -04:00 committed by GitHub
commit ddb007af65
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
205 changed files with 8144 additions and 2888 deletions

4
.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1,4 @@
ldm/invoke/pngwriter.py @CapableWeb
ldm/invoke/server_legacy.py @CapableWeb
scripts/legacy_api.py @CapableWeb
tests/legacy_tests.sh @CapableWeb

View File

@ -1,42 +0,0 @@
# This workflow:
# - Sets up Node.js with a yarn cache
# - Installs packages via yarn, using yarn.lock
# - Builds the frontend bundle
# - Commits the frontend bundle
name: Frontend CI/CD [dev]
on:
push:
branches: [ "development" ]
paths: 'frontend/**'
jobs:
build:
runs-on: ubuntu-latest
defaults:
run:
working-directory: 'frontend'
steps:
- uses: actions/checkout@v3
- name: Use Node.js 18
uses: actions/setup-node@v3
with:
node-version: 18
cache: 'yarn'
cache-dependency-path: '**/yarn.lock'
- name: install packages
run: yarn install --immutable --immutable-cache --check-cache
- name: build frontend bundle [dev]
run: yarn build-dev
- name: setup git config
run: |
git config user.name "GitHub Actions Bot"
git config user.email "<>"
- name: commit bundle [dev]
run: |
git add dist/* -f
git commit -m "[bot] builds dev bundle"
git push origin development

View File

@ -1,42 +0,0 @@
# This workflow:
# - Sets up Node.js with a yarn cache
# - Installs packages via yarn, using yarn.lock
# - Builds the frontend bundle
# - Commits the frontend bundle
name: Frontend CI/CD [production]
on:
push:
branches: [ "main" ]
paths: 'frontend/**'
jobs:
build:
runs-on: ubuntu-latest
defaults:
run:
working-directory: 'frontend'
steps:
- uses: actions/checkout@v3
- name: Use Node.js 18
uses: actions/setup-node@v3
with:
node-version: 18
cache: 'yarn'
cache-dependency-path: '**/yarn.lock'
- name: install packages
run: yarn install --immutable --immutable-cache --check-cache
- name: build frontend bundle [production]
run: yarn build
- name: setup git config
run: |
git config user.name "GitHub Actions Bot"
git config user.email "<>"
- name: commit bundle [production]
run: |
git add dist/* -f
git commit -m "[bot] builds production bundle"
git push origin main

View File

@ -3,9 +3,9 @@ on:
push:
branches:
- main
pull_request:
branches:
- main
# pull_request:
# branches:
# - main
jobs:
build:
name: Deploy docs to GitHub Pages

View File

@ -1,4 +1,4 @@
name: Test Dream with Conda
name: Test Invoke with Conda
on:
push:
branches:
@ -9,7 +9,7 @@ jobs:
strategy:
matrix:
os: [ ubuntu-latest, macos-12 ]
name: Test dream.py on ${{ matrix.os }} with conda
name: Test invoke.py on ${{ matrix.os }} with conda
runs-on: ${{ matrix.os }}
steps:
- run: |
@ -85,9 +85,9 @@ jobs:
fi
# Utterly hacky, but I don't know how else to do this
if [[ ${{ github.ref }} == 'refs/heads/master' ]]; then
time ${{ steps.vars.outputs.PYTHON_BIN }} scripts/dream.py --from_file tests/preflight_prompts.txt
time ${{ steps.vars.outputs.PYTHON_BIN }} scripts/invoke.py --from_file tests/preflight_prompts.txt
elif [[ ${{ github.ref }} == 'refs/heads/development' ]]; then
time ${{ steps.vars.outputs.PYTHON_BIN }} scripts/dream.py --from_file tests/dev_prompts.txt
time ${{ steps.vars.outputs.PYTHON_BIN }} scripts/invoke.py --from_file tests/dev_prompts.txt
fi
mkdir -p outputs/img-samples
- name: Archive results

5
.gitignore vendored
View File

@ -1,7 +1,7 @@
# ignore default image save location and model symbolic link
outputs/
models/ldm/stable-diffusion-v1/model.ckpt
ldm/dream/restoration/codeformer/weights
ldm/invoke/restoration/codeformer/weights
# ignore the Anaconda/Miniconda installer used while building Docker image
anaconda.sh
@ -196,3 +196,6 @@ checkpoints
.vscode/
gfpgan/
models/ldm/stable-diffusion-v1/model.sha256
# GFPGAN model files
gfpgan/

View File

@ -1,6 +1,6 @@
MIT License
Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
Copyright (c) 2022 Lincoln Stein and InvokeAI Organization
This software is derived from a fork of the source code available from
https://github.com/pesser/stable-diffusion and

103
README.md
View File

@ -2,14 +2,7 @@
# InvokeAI: A Stable Diffusion Toolkit
_Note: This fork is rapidly evolving. Please use the
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to
report bugs and make feature requests. Be sure to use the provided
templates. They will help aid diagnose issues faster._
_This repository was formally known as lstein/stable-diffusion_
# **Table of Contents**
_Formerly known as lstein/stable-diffusion_
![project logo](docs/assets/logo.png)
@ -24,7 +17,7 @@ _This repository was formally known as lstein/stable-diffusion_
[CI checks on dev badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
[CI checks on dev link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-dream-conda.yml
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
[discord link]: https://discord.gg/ZmtBAhwWhy
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
@ -41,10 +34,18 @@ _This repository was formally known as lstein/stable-diffusion_
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
</div>
This is a fork of [CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion), the open
source text-to-image generator. It provides a streamlined process with various new features and
options to aid the image generation process. It runs on Windows, Mac and Linux machines, and runs on
GPU cards with as little as 4 GB or RAM.
This is a fork of
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion),
the open source text-to-image generator. It provides a streamlined
process with various new features and options to aid the image
generation process. It runs on Windows, Mac and Linux machines, with
GPU cards with as little as 4 GB of RAM. It provides both a polished
Web interface (see below), and an easy-to-use command-line interface.
**Quick links**: [<a href="https://discord.gg/NwVCmKwY">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
_Note: This fork is rapidly evolving. Please use the
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
@ -88,22 +89,28 @@ You wil need one of the following:
#### Disk
- At least 6 GB of free disk space for the machine learning model, Python, and all its dependencies.
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
#### Note
**Note**
If you have a Nvidia 10xx series card (e.g. the 1080ti), please
run the dream script in full-precision mode as shown below.
Similarly, specify full-precision mode on Apple M1 hardware.
Precision is auto configured based on the device. If however you encounter
errors like 'expected type Float but found Half' or 'not implemented for Half'
you can try starting `dream.py` with the `--precision=float32` flag:
you can try starting `invoke.py` with the `--precision=float32` flag:
```bash
(ldm) ~/stable-diffusion$ python scripts/dream.py --precision=float32
(ldm) ~/stable-diffusion$ python scripts/invoke.py --precision=float32
```
### Features
#### Major Features
- [Web Server](docs/features/WEB.md)
- [Interactive Command Line Interface](docs/features/CLI.md)
- [Image To Image](docs/features/IMG2IMG.md)
- [Inpainting Support](docs/features/INPAINTING.md)
@ -111,10 +118,9 @@ you can try starting `dream.py` with the `--precision=float32` flag:
- [Upscaling, face-restoration and outpainting](docs/features/POSTPROCESS.md)
- [Seamless Tiling](docs/features/OTHER.md#seamless-tiling)
- [Google Colab](docs/features/OTHER.md#google-colab)
- [Web Server](docs/features/WEB.md)
- [Reading Prompts From File](docs/features/PROMPTS.md#reading-prompts-from-a-file)
- [Shortcut: Reusing Seeds](docs/features/OTHER.md#shortcuts-reusing-seeds)
- [Weighted Prompts](docs/features/PROMPTS.md#weighted-prompts)
- [Prompt Blending](docs/features/PROMPTS.md#prompt-blending)
- [Thresholding and Perlin Noise Initialization Options](/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options)
- [Negative/Unconditioned Prompts](docs/features/PROMPTS.md#negative-and-unconditioned-prompts)
- [Variations](docs/features/VARIATIONS.md)
@ -128,39 +134,38 @@ you can try starting `dream.py` with the `--precision=float32` flag:
### Latest Changes
- vNEXT (TODO 2022)
- v2.0.1 (13 October 2022)
- fix noisy images at high step count when using k* samplers
- dream.py script now calls invoke.py module directly rather than
via a new python process (which could break the environment)
- Deprecated `--full_precision` / `-F`. Simply omit it and `dream.py` will auto
- v2.0.0 (9 October 2022)
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
for backward compatibility.
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/INPAINTING.md">inpainting</a> and <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OUTPAINTING.md">outpainting</a>
- img2img runs on all k* samplers
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/PROMPTS.md#negative-and-unconditioned-prompts">negative prompts</a>
- Support for CodeFormer face reconstruction
- Support for Textual Inversion on Macintoshes
- Support in both WebGUI and CLI for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/POSTPROCESS.md">post-processing of previously-generated images</a>
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
and "embiggen" upscaling. See the `!fix` command.
- New `--hires` option on `invoke>` line allows <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.md#this-is-an-example-of-txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
- New `--perlin` and `--threshold` options allow you to add and control variation
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
and tweaking of previous settings.
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
- Improved <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.md">command-line completion behavior</a>.
New commands added:
* List command-line history with `!history`
* Search command-line history with `!search`
* Clear history with `!clear`
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
configure. To switch away from auto use the new flag like `--precision=float32`.
- v1.14 (11 September 2022)
- Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs.
- Full support for Apple hardware with M1 or M2 chips.
- Add "seamless mode" for circular tiling of image. Generates beautiful effects.
([prixt](https://github.com/prixt)).
- Inpainting support.
- Improved web server GUI.
- Lots of code and documentation cleanups.
- v1.13 (3 September 2022
- Support image variations (see [VARIATIONS](docs/features/VARIATIONS.md)
([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers)
- Supports a Google Colab notebook for a standalone server running on Google hardware
[Arturo Mendivil](https://github.com/artmen1516)
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
[Kevin Gibbons](https://github.com/bakkot)
- WebUI supports incremental display of in-progress images during generation
[Kevin Gibbons](https://github.com/bakkot)
- A new configuration file scheme that allows new models (including upcoming
stable-diffusion-v1.5) to be added without altering the code.
([David Wager](https://github.com/maddavid12))
- Can specify --grid on dream.py command line as the default.
- Miscellaneous internal bug and stability fixes.
- Works on M1 Apple hardware.
- Multiple bug fixes.
For older changelogs, please visit the **[CHANGELOG](docs/features/CHANGELOG.md)**.
### Troubleshooting

View File

@ -12,9 +12,9 @@ from PIL import Image
from uuid import uuid4
from threading import Event
from ldm.dream.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
from ldm.dream.pngwriter import PngWriter, retrieve_metadata
from ldm.dream.conditioning import split_weighted_subprompts
from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
from ldm.invoke.conditioning import split_weighted_subprompts
from backend.modules.parameters import parameters_to_command
@ -49,24 +49,16 @@ class InvokeAIWebServer:
engineio_logger = True if args.web_verbose else False
max_http_buffer_size = 10000000
# CORS Allowed Setup
cors_allowed_origins = [
'http://127.0.0.1:5173',
'http://localhost:5173',
]
additional_allowed_origins = (
opt.cors if opt.cors else []
) # additional CORS allowed origins
if self.host == '127.0.0.1':
cors_allowed_origins.extend(
[
f'http://{self.host}:{self.port}',
f'http://localhost:{self.port}',
]
)
cors_allowed_origins = (
cors_allowed_origins + additional_allowed_origins
)
socketio_args = {
'logger': logger,
'engineio_logger': engineio_logger,
'max_http_buffer_size': max_http_buffer_size,
'ping_interval': (50, 50),
'ping_timeout': 60,
}
if opt.cors:
socketio_args['cors_allowed_origins'] = opt.cors
self.app = Flask(
__name__, static_url_path='', static_folder='../frontend/dist/'
@ -74,12 +66,7 @@ class InvokeAIWebServer:
self.socketio = SocketIO(
self.app,
logger=logger,
engineio_logger=engineio_logger,
max_http_buffer_size=max_http_buffer_size,
cors_allowed_origins=cors_allowed_origins,
ping_interval=(50, 50),
ping_timeout=60,
**socketio_args
)
# Keep Server Alive Route
@ -160,7 +147,7 @@ class InvokeAIWebServer:
self.init_image_path = os.path.join(self.result_path, 'init-images/')
self.mask_image_path = os.path.join(self.result_path, 'mask-images/')
# txt log
self.log_path = os.path.join(self.result_path, 'dream_log.txt')
self.log_path = os.path.join(self.result_path, 'invoke_log.txt')
# make all output paths
[
os.makedirs(path, exist_ok=True)
@ -270,14 +257,14 @@ class InvokeAIWebServer:
@socketio.on('generateImage')
def handle_generate_image_event(
generation_parameters, esrgan_parameters, gfpgan_parameters
generation_parameters, esrgan_parameters, facetool_parameters
):
try:
print(
f'>> Image generation requested: {generation_parameters}\nESRGAN parameters: {esrgan_parameters}\nGFPGAN parameters: {gfpgan_parameters}'
f'>> Image generation requested: {generation_parameters}\nESRGAN parameters: {esrgan_parameters}\nFacetool parameters: {facetool_parameters}'
)
self.generate_images(
generation_parameters, esrgan_parameters, gfpgan_parameters
generation_parameters, esrgan_parameters, facetool_parameters
)
except Exception as e:
self.socketio.emit('error', {'message': (str(e))})
@ -313,9 +300,11 @@ class InvokeAIWebServer:
)
if postprocessing_parameters['type'] == 'esrgan':
progress.set_current_status('Upscaling')
progress.set_current_status('Upscaling (ESRGAN)')
elif postprocessing_parameters['type'] == 'gfpgan':
progress.set_current_status('Restoring Faces')
progress.set_current_status('Restoring Faces (GFPGAN)')
elif postprocessing_parameters['type'] == 'codeformer':
progress.set_current_status('Restoring Faces (Codeformer)')
socketio.emit('progressUpdate', progress.to_formatted_dict())
eventlet.sleep(0)
@ -332,9 +321,17 @@ class InvokeAIWebServer:
elif postprocessing_parameters['type'] == 'gfpgan':
image = self.gfpgan.process(
image=image,
strength=postprocessing_parameters['gfpgan_strength'],
strength=postprocessing_parameters['facetool_strength'],
seed=seed,
)
elif postprocessing_parameters['type'] == 'codeformer':
image = self.codeformer.process(
image=image,
strength=postprocessing_parameters['facetool_strength'],
fidelity=postprocessing_parameters['codeformer_fidelity'],
seed=seed,
device='cpu' if str(self.generate.device) == 'mps' else self.generate.device
)
else:
raise TypeError(
f'{postprocessing_parameters["type"]} is not a valid postprocessing type'
@ -461,7 +458,7 @@ class InvokeAIWebServer:
}
def generate_images(
self, generation_parameters, esrgan_parameters, gfpgan_parameters
self, generation_parameters, esrgan_parameters, facetool_parameters
):
try:
self.canceled.clear()
@ -564,7 +561,7 @@ class InvokeAIWebServer:
nonlocal generation_parameters
nonlocal esrgan_parameters
nonlocal gfpgan_parameters
nonlocal facetool_parameters
nonlocal progress
step_index = 1
@ -624,23 +621,41 @@ class InvokeAIWebServer:
if self.canceled.is_set():
raise CanceledException
if gfpgan_parameters:
progress.set_current_status('Restoring Faces')
if facetool_parameters:
if facetool_parameters['type'] == 'gfpgan':
progress.set_current_status('Restoring Faces (GFPGAN)')
elif facetool_parameters['type'] == 'codeformer':
progress.set_current_status('Restoring Faces (Codeformer)')
progress.set_current_status_has_steps(False)
self.socketio.emit(
'progressUpdate', progress.to_formatted_dict()
)
eventlet.sleep(0)
image = self.gfpgan.process(
image=image,
strength=gfpgan_parameters['strength'],
seed=seed,
)
if facetool_parameters['type'] == 'gfpgan':
image = self.gfpgan.process(
image=image,
strength=facetool_parameters['strength'],
seed=seed,
)
elif facetool_parameters['type'] == 'codeformer':
image = self.codeformer.process(
image=image,
strength=facetool_parameters['strength'],
fidelity=facetool_parameters['codeformer_fidelity'],
seed=seed,
device='cpu' if str(self.generate.device) == 'mps' else self.generate.device,
)
all_parameters['codeformer_fidelity'] = facetool_parameters['codeformer_fidelity']
postprocessing = True
all_parameters['gfpgan_strength'] = gfpgan_parameters[
all_parameters['facetool_strength'] = facetool_parameters[
'strength'
]
all_parameters['facetool_type'] = facetool_parameters[
'type'
]
progress.set_current_status('Saving Image')
self.socketio.emit(
@ -736,6 +751,7 @@ class InvokeAIWebServer:
'height',
'extra',
'seamless',
'hires_fix',
]
rfc_dict = {}
@ -748,14 +764,16 @@ class InvokeAIWebServer:
postprocessing = []
# 'postprocessing' is either null or an
if 'gfpgan_strength' in parameters:
postprocessing.append(
{
'type': 'gfpgan',
'strength': float(parameters['gfpgan_strength']),
if 'facetool_strength' in parameters:
facetool_parameters = {
'type': str(parameters['facetool_type']),
'strength': float(parameters['facetool_strength']),
}
)
if parameters['facetool_type'] == 'codeformer':
facetool_parameters['fidelity'] = float(parameters['codeformer_fidelity'])
postprocessing.append(facetool_parameters)
if 'upscale' in parameters:
postprocessing.append(
@ -774,7 +792,7 @@ class InvokeAIWebServer:
rfc_dict['sampler'] = parameters['sampler_name']
# display weighted subprompts (liable to change)
subprompts = split_weighted_subprompts(parameters['prompt'])
subprompts = split_weighted_subprompts(parameters['prompt'], skip_normalize=True)
subprompts = [{'prompt': x[0], 'weight': x[1]} for x in subprompts]
rfc_dict['prompt'] = subprompts
@ -850,8 +868,15 @@ class InvokeAIWebServer:
elif parameters['type'] == 'gfpgan':
postprocessing_metadata['type'] = 'gfpgan'
postprocessing_metadata['strength'] = parameters[
'gfpgan_strength'
'facetool_strength'
]
elif parameters['type'] == 'codeformer':
postprocessing_metadata['type'] = 'codeformer'
postprocessing_metadata['strength'] = parameters[
'facetool_strength'
]
postprocessing_metadata['fidelity'] = parameters['codeformer_fidelity']
else:
raise TypeError(f"Invalid type: {parameters['type']}")

View File

@ -1,6 +1,6 @@
import argparse
import os
from ldm.dream.args import PRECISION_CHOICES
from ldm.invoke.args import PRECISION_CHOICES
def create_cmd_parser():

View File

@ -15,7 +15,7 @@ SAMPLER_CHOICES = [
def parameters_to_command(params):
"""
Converts dict of parameters into a `dream.py` REPL command.
Converts dict of parameters into a `invoke.py` REPL command.
"""
switches = list()
@ -36,6 +36,8 @@ def parameters_to_command(params):
switches.append(f'-A {params["sampler_name"]}')
if "seamless" in params and params["seamless"] == True:
switches.append(f"--seamless")
if "hires_fix" in params and params["hires_fix"] == True:
switches.append(f"--hires")
if "init_img" in params and len(params["init_img"]) > 0:
switches.append(f'-I {params["init_img"]}')
if "init_mask" in params and len(params["init_mask"]) > 0:
@ -46,8 +48,14 @@ def parameters_to_command(params):
switches.append(f'-f {params["strength"]}')
if "fit" in params and params["fit"] == True:
switches.append(f"--fit")
if "gfpgan_strength" in params and params["gfpgan_strength"]:
if "facetool" in params:
switches.append(f'-ft {params["facetool"]}')
if "facetool_strength" in params and params["facetool_strength"]:
switches.append(f'-G {params["facetool_strength"]}')
elif "gfpgan_strength" in params and params["gfpgan_strength"]:
switches.append(f'-G {params["gfpgan_strength"]}')
if "codeformer_fidelity" in params:
switches.append(f'-cf {params["codeformer_fidelity"]}')
if "upscale" in params and params["upscale"]:
switches.append(f'-U {params["upscale"][0]} {params["upscale"][1]}')
if "variation_amount" in params and params["variation_amount"] > 0:

View File

@ -30,10 +30,10 @@ from send2trash import send2trash
from ldm.generate import Generate
from ldm.dream.restoration import Restoration
from ldm.dream.pngwriter import PngWriter, retrieve_metadata
from ldm.dream.args import APP_ID, APP_VERSION, calculate_init_img_hash
from ldm.dream.conditioning import split_weighted_subprompts
from ldm.invoke.restoration import Restoration
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
from ldm.invoke.args import APP_ID, APP_VERSION, calculate_init_img_hash
from ldm.invoke.conditioning import split_weighted_subprompts
from modules.parameters import parameters_to_command
@ -125,7 +125,7 @@ class CanceledException(Exception):
try:
gfpgan, codeformer, esrgan = None, None, None
from ldm.dream.restoration.base import Restoration
from ldm.invoke.restoration.base import Restoration
restoration = Restoration()
gfpgan, codeformer = restoration.load_face_restore_models()
@ -164,7 +164,7 @@ init_image_path = os.path.join(result_path, "init-images/")
mask_image_path = os.path.join(result_path, "mask-images/")
# txt log
log_path = os.path.join(result_path, "dream_log.txt")
log_path = os.path.join(result_path, "invoke_log.txt")
# make all output paths
[
@ -349,7 +349,7 @@ def handle_run_gfpgan_event(original_image, gfpgan_parameters):
eventlet.sleep(0)
image = gfpgan.process(
image=image, strength=gfpgan_parameters["gfpgan_strength"], seed=seed
image=image, strength=gfpgan_parameters["facetool_strength"], seed=seed
)
progress["currentStatus"] = "Saving image"
@ -464,7 +464,7 @@ def parameters_to_post_processed_image_metadata(parameters, original_image_path,
image["strength"] = parameters["upscale"][1]
elif type == "gfpgan":
image["type"] = "gfpgan"
image["strength"] = parameters["gfpgan_strength"]
image["strength"] = parameters["facetool_strength"]
else:
raise TypeError(f"Invalid type: {type}")
@ -493,6 +493,7 @@ def parameters_to_generated_image_metadata(parameters):
"height",
"extra",
"seamless",
"hires_fix",
]
rfc_dict = {}
@ -505,10 +506,10 @@ def parameters_to_generated_image_metadata(parameters):
postprocessing = []
# 'postprocessing' is either null or an
if "gfpgan_strength" in parameters:
if "facetool_strength" in parameters:
postprocessing.append(
{"type": "gfpgan", "strength": float(parameters["gfpgan_strength"])}
{"type": "gfpgan", "strength": float(parameters["facetool_strength"])}
)
if "upscale" in parameters:
@ -751,7 +752,7 @@ def generate_images(generation_parameters, esrgan_parameters, gfpgan_parameters)
image=image, strength=gfpgan_parameters["strength"], seed=seed
)
postprocessing = True
all_parameters["gfpgan_strength"] = gfpgan_parameters["strength"]
all_parameters["facetool_strength"] = gfpgan_parameters["strength"]
progress["currentStatus"] = "Saving image"
socketio.emit("progressUpdate", progress)

View File

@ -9,10 +9,12 @@
laion400m:
config: configs/latent-diffusion/txt2img-1p4B-eval.yaml
weights: models/ldm/text2img-large/model.ckpt
description: Latent Diffusion LAION400M model
width: 256
height: 256
stable-diffusion-1.4:
config: configs/stable-diffusion/v1-inference.yaml
weights: models/ldm/stable-diffusion-v1/model.ckpt
description: Stable Diffusion inference model version 1.4
width: 512
height: 512

View File

@ -32,7 +32,7 @@ model:
placeholder_strings: ["*"]
initializer_words: ['face', 'man', 'photo', 'africanmale']
per_image_tokens: false
num_vectors_per_token: 6
num_vectors_per_token: 1
progressive_words: False
unet_config:

View File

@ -1,51 +1,106 @@
# **Changelog**
---
title: Changelog
---
## v1.13 (in process)
# :octicons-log-16: **Changelog**
- Supports a Google Colab notebook for a standalone server running on Google hardware [Arturo Mendivil](https://github.com/artmen1516)
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling [Kevin Gibbons](https://github.com/bakkot)
- WebUI supports incremental display of in-progress images during generation [Kevin Gibbons](https://github.com/bakkot)
- Output directory can be specified on the dream> command line.
- The grid was displaying duplicated images when not enough images to fill the final row [Muhammad Usama](https://github.com/SMUsamaShah)
- Can specify --grid on dream.py command line as the default.
- Miscellaneous internal bug and stability fixes.
## v2.0.1 (13 October 2022)
- fix noisy images at high step count when using k* samplers
- dream.py script now calls invoke.py module directly rather than
via a new python process (which could break the environment)
## v2.0.0 <small>(9 October 2022)</small>
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
for backward compatibility.
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/INPAINTING.md">inpainting</a> and <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OUTPAINTING.md">outpainting</a>
- img2img runs on all k* samplers
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/PROMPTS.md#negative-and-unconditioned-prompts">negative prompts</a>
- Support for CodeFormer face reconstruction
- Support for Textual Inversion on Macintoshes
- Support in both WebGUI and CLI for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/POSTPROCESS.md">post-processing of previously-generated images</a>
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
and "embiggen" upscaling. See the `!fix` command.
- New `--hires` option on `invoke>` line allows <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.m#this-is-an-example-of-txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
- New `--perlin` and `--threshold` options allow you to add and control variation
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
and tweaking of previous settings.
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
- Improved <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.m">command-line completion behavior</a>.
New commands added:
* List command-line history with `!history`
* Search command-line history with `!search`
* Clear history with `!clear`
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
configure. To switch away from auto use the new flag like `--precision=float32`.
## v1.14 <small>(11 September 2022)</small>
- Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs.
- Full support for Apple hardware with M1 or M2 chips.
- Add "seamless mode" for circular tiling of image. Generates beautiful effects.
([prixt](https://github.com/prixt)).
- Inpainting support.
- Improved web server GUI.
- Lots of code and documentation cleanups.
## v1.13 <small>(3 September 2022)</small>
- Support image variations (see [VARIATIONS](features/VARIATIONS.md)
([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers)
- Supports a Google Colab notebook for a standalone server running on Google hardware
[Arturo Mendivil](https://github.com/artmen1516)
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
[Kevin Gibbons](https://github.com/bakkot)
- WebUI supports incremental display of in-progress images during generation
[Kevin Gibbons](https://github.com/bakkot)
- A new configuration file scheme that allows new models (including upcoming
stable-diffusion-v1.5) to be added without altering the code.
([David Wager](https://github.com/maddavid12))
- Can specify --grid on invoke.py command line as the default.
- Miscellaneous internal bug and stability fixes.
- Works on M1 Apple hardware.
- Multiple bug fixes.
---
## v1.12 (28 August 2022)
## v1.12 <small>(28 August 2022)</small>
- Improved file handling, including ability to read prompts from standard input.
(kudos to [Yunsaki](https://github.com/yunsaki)
- The web server is now integrated with the dream.py script. Invoke by adding --web to
the dream.py command arguments.
- The web server is now integrated with the invoke.py script. Invoke by adding --web to
the invoke.py command arguments.
- Face restoration and upscaling via GFPGAN and Real-ESGAN are now automatically
enabled if the GFPGAN directory is located as a sibling to Stable Diffusion.
VRAM requirements are modestly reduced. Thanks to both [Blessedcoolant](https://github.com/blessedcoolant) and
[Oceanswave](https://github.com/oceanswave) for their work on this.
- You can now swap samplers on the dream> command line. [Blessedcoolant](https://github.com/blessedcoolant)
- You can now swap samplers on the invoke> command line. [Blessedcoolant](https://github.com/blessedcoolant)
---
## v1.11 (26 August 2022)
## v1.11 <small>(26 August 2022)</small>
- NEW FEATURE: Support upscaling and face enhancement using the GFPGAN module. (kudos to [Oceanswave](https://github.com/Oceanswave)
- You now can specify a seed of -1 to use the previous image's seed, -2 to use the seed for the image generated before that, etc.
Seed memory only extends back to the previous command, but will work on all images generated with the -n# switch.
- Variant generation support temporarily disabled pending more general solution.
- Created a feature branch named **yunsaki-morphing-dream** which adds experimental support for
- Created a feature branch named **yunsaki-morphing-invoke** which adds experimental support for
iteratively modifying the prompt and its parameters. Please see[ Pull Request #86](https://github.com/lstein/stable-diffusion/pull/86)
for a synopsis of how this works. Note that when this feature is eventually added to the main branch, it will may be modified
significantly.
---
## v1.10 (25 August 2022)
## v1.10 <small>(25 August 2022)</small>
- A barebones but fully functional interactive web server for online generation of txt2img and img2img.
---
## v1.09 (24 August 2022)
## v1.09 <small>(24 August 2022)</small>
- A new -v option allows you to generate multiple variants of an initial image
in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave). [
@ -55,9 +110,9 @@
---
## v1.08 (24 August 2022)
## v1.08 <small>(24 August 2022)</small>
- Escape single quotes on the dream> command before trying to parse. This avoids
- Escape single quotes on the invoke> command before trying to parse. This avoids
parse errors.
- Removed instruction to get Python3.8 as first step in Windows install.
Anaconda3 does it for you.
@ -66,7 +121,7 @@
---
## v1.07 (23 August 2022)
## v1.07 <small>(23 August 2022)</small>
- Image filenames will now never fill gaps in the sequence, but will be assigned the
next higher name in the chosen directory. This ensures that the alphabetic and chronological
@ -74,14 +129,14 @@
---
## v1.06 (23 August 2022)
## v1.06 <small>(23 August 2022)</small>
- Added weighted prompt support contributed by [xraxra](https://github.com/xraxra)
- Example of using weighted prompts to tweak a demonic figure contributed by [bmaltais](https://github.com/bmaltais)
---
## v1.05 (22 August 2022 - after the drop)
## v1.05 <small>(22 August 2022 - after the drop)</small>
- Filenames now use the following formats:
000010.95183149.png -- Two files produced by the same command (e.g. -n2),
@ -94,12 +149,12 @@
be regenerated with the indicated key
- It should no longer be possible for one image to overwrite another
- You can use the "cd" and "pwd" commands at the dream> prompt to set and retrieve
- You can use the "cd" and "pwd" commands at the invoke> prompt to set and retrieve
the path of the output directory.
---
## v1.04 (22 August 2022 - after the drop)
## v1.04 <small>(22 August 2022 - after the drop)</small>
- Updated README to reflect installation of the released weights.
- Suppressed very noisy and inconsequential warning when loading the frozen CLIP
@ -107,14 +162,14 @@
---
## v1.03 (22 August 2022)
## v1.03 <small>(22 August 2022)</small>
- The original txt2img and img2img scripts from the CompViz repository have been moved into
a subfolder named "orig_scripts", to reduce confusion.
---
## v1.02 (21 August 2022)
## v1.02 <small>(21 August 2022)</small>
- A copy of the prompt and all of its switches and options is now stored in the corresponding
image in a tEXt metadata field named "Dream". You can read the prompt using scripts/images2prompt.py,
@ -123,15 +178,15 @@
---
## v1.01 (21 August 2022)
## v1.01 <small>(21 August 2022)</small>
- added k_lms sampling.
**Please run "conda env update" to load the k_lms dependencies!!**
- use half precision arithmetic by default, resulting in faster execution and lower memory requirements
Pass argument --full_precision to dream.py to get slower but more accurate image generation
Pass argument --full_precision to invoke.py to get slower but more accurate image generation
---
## Links
- **[Read Me](../readme.md)**
- **[Read Me](index.md)**

Binary file not shown.

After

Width:  |  Height:  |  Size: 284 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 252 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 519 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 519 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 439 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 983 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 101 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 546 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 336 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 148 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 637 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 529 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 838 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 838 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 989 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 501 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 473 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 618 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 557 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 340 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 338 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

View File

@ -4,7 +4,7 @@ title: Changelog
# :octicons-log-16: Changelog
## v1.13 <small>(in process)</small>
## v1.13
- Supports a Google Colab notebook for a standalone server running on Google
hardware [Arturo Mendivil](https://github.com/artmen1516)
@ -12,10 +12,10 @@ title: Changelog
[Kevin Gibbons](https://github.com/bakkot)
- WebUI supports incremental display of in-progress images during generation
[Kevin Gibbons](https://github.com/bakkot)
- Output directory can be specified on the dream> command line.
- Output directory can be specified on the invoke> command line.
- The grid was displaying duplicated images when not enough images to fill the
final row [Muhammad Usama](https://github.com/SMUsamaShah)
- Can specify --grid on dream.py command line as the default.
- Can specify --grid on invoke.py command line as the default.
- Miscellaneous internal bug and stability fixes.
---
@ -24,14 +24,14 @@ title: Changelog
- Improved file handling, including ability to read prompts from standard input.
(kudos to [Yunsaki](https://github.com/yunsaki)
- The web server is now integrated with the dream.py script. Invoke by adding
--web to the dream.py command arguments.
- The web server is now integrated with the invoke.py script. Invoke by adding
--web to the invoke.py command arguments.
- Face restoration and upscaling via GFPGAN and Real-ESGAN are now automatically
enabled if the GFPGAN directory is located as a sibling to Stable Diffusion.
VRAM requirements are modestly reduced. Thanks to both
[Blessedcoolant](https://github.com/blessedcoolant) and
[Oceanswave](https://github.com/oceanswave) for their work on this.
- You can now swap samplers on the dream> command line.
- You can now swap samplers on the invoke> command line.
[Blessedcoolant](https://github.com/blessedcoolant)
---
@ -45,7 +45,7 @@ title: Changelog
back to the previous command, but will work on all images generated with the
-n# switch.
- Variant generation support temporarily disabled pending more general solution.
- Created a feature branch named **yunsaki-morphing-dream** which adds
- Created a feature branch named **yunsaki-morphing-invoke** which adds
experimental support for iteratively modifying the prompt and its parameters.
Please
see[ Pull Request #86](https://github.com/lstein/stable-diffusion/pull/86) for
@ -75,7 +75,7 @@ title: Changelog
## v1.08 <small>(24 August 2022)</small>
- Escape single quotes on the dream> command before trying to parse. This avoids
- Escape single quotes on the invoke> command before trying to parse. This avoids
parse errors.
- Removed instruction to get Python3.8 as first step in Windows install.
Anaconda3 does it for you.
@ -112,7 +112,7 @@ title: Changelog
can be regenerated with the indicated key
- It should no longer be possible for one image to overwrite another
- You can use the "cd" and "pwd" commands at the dream> prompt to set and
- You can use the "cd" and "pwd" commands at the invoke> prompt to set and
retrieve the path of the output directory.
## v1.04 <small>(22 August 2022 - after the drop)</small>
@ -139,5 +139,5 @@ title: Changelog
- added k_lms sampling. **Please run "conda env update -f environment.yaml" to
load the k_lms dependencies!!**
- use half precision arithmetic by default, resulting in faster execution and
lower memory requirements Pass argument --full_precision to dream.py to get
lower memory requirements Pass argument --full_precision to invoke.py to get
slower but more accurate image generation

View File

@ -8,8 +8,8 @@ hide:
## **Interactive Command Line Interface**
The `dream.py` script, located in `scripts/dream.py`, provides an interactive
interface to image generation similar to the "dream mothership" bot that Stable
The `invoke.py` script, located in `scripts/dream.py`, provides an interactive
interface to image generation similar to the "invoke mothership" bot that Stable
AI provided on its Discord server.
Unlike the `txt2img.py` and `img2img.py` scripts provided in the original
@ -34,33 +34,33 @@ The script is confirmed to work on Linux, Windows and Mac systems.
currently rudimentary, but a much better replacement is on its way.
```bash
(ldm) ~/stable-diffusion$ python3 ./scripts/dream.py
(invokeai) ~/stable-diffusion$ python3 ./scripts/invoke.py
* Initializing, be patient...
Loading model from models/ldm/text2img-large/model.ckpt
(...more initialization messages...)
* Initialization done! Awaiting your command...
dream> ashley judd riding a camel -n2 -s150
invoke> ashley judd riding a camel -n2 -s150
Outputs:
outputs/img-samples/00009.png: "ashley judd riding a camel" -n2 -s150 -S 416354203
outputs/img-samples/00010.png: "ashley judd riding a camel" -n2 -s150 -S 1362479620
dream> "there's a fly in my soup" -n6 -g
invoke> "there's a fly in my soup" -n6 -g
outputs/img-samples/00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
seeds for individual rows: [2685670268, 1216708065, 2335773498, 822223658, 714542046, 3395302430]
dream> q
invoke> q
# this shows how to retrieve the prompt stored in the saved image's metadata
(ldm) ~/stable-diffusion$ python ./scripts/images2prompt.py outputs/img_samples/*.png
(invokeai) ~/stable-diffusion$ python ./scripts/images2prompt.py outputs/img_samples/*.png
00009.png: "ashley judd riding a camel" -s150 -S 416354203
00010.png: "ashley judd riding a camel" -s150 -S 1362479620
00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
```
![dream-py-demo](../assets/dream-py-demo.png)
![invoke-py-demo](../assets/dream-py-demo.png)
The `dream>` prompt's arguments are pretty much identical to those used in the
Discord bot, except you don't need to type "!dream" (it doesn't hurt if you do).
The `invoke>` prompt's arguments are pretty much identical to those used in the
Discord bot, except you don't need to type `!invoke` (it doesn't hurt if you do).
A significant change is that creation of individual images is now the default
unless `--grid` (`-g`) is given. A full list is given in
[List of prompt arguments](#list-of-prompt-arguments).
@ -73,10 +73,9 @@ the location of the model weight files.
### List of arguments recognized at the command line
These command-line arguments can be passed to `dream.py` when you first run it
These command-line arguments can be passed to `invoke.py` when you first run it
from the Windows, Mac or Linux command line. Some set defaults that can be
overridden on a per-prompt basis (see [List of prompt arguments]
(#list-of-prompt-arguments). Others
overridden on a per-prompt basis (see [List of prompt arguments](#list-of-prompt-arguments). Others
| Argument <img width="240" align="right"/> | Shortcut <img width="100" align="right"/> | Default <img width="320" align="right"/> | Description |
| ----------------------------------------- | ----------------------------------------- | ---------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
@ -86,6 +85,7 @@ overridden on a per-prompt basis (see [List of prompt arguments]
| `--from_file <path>` | | `None` | Read list of prompts from a file. Use `-` to read from standard input |
| `--model <modelname>` | | `stable-diffusion-1.4` | Loads model specified in configs/models.yaml. Currently one of "stable-diffusion-1.4" or "laion400m" |
| `--full_precision` | `-F` | `False` | Run in slower full-precision mode. Needed for Macintosh M1/M2 hardware and some older video cards. |
| `--png_compression <0-9>` | `-z<0-9>` | 6 | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
| `--web` | | `False` | Start in web server mode |
| `--host <ip addr>` | | `localhost` | Which network interface web server should listen on. Set to 0.0.0.0 to listen on any. |
| `--port <port>` | | `9090` | Which port web server should listen for requests on. |
@ -101,42 +101,49 @@ overridden on a per-prompt basis (see [List of prompt arguments]
| `--free_gpu_mem` | | `False` | Free GPU memory after sampling, to allow image decoding and saving in low VRAM conditions |
| `--precision` | | `auto` | Set model precision, default is selected by device. Options: auto, float32, float16, autocast |
#### deprecated
!!! warning deprecated
These arguments are deprecated but still work:
These arguments are deprecated but still work:
<div align="center" markdown>
| Argument | Shortcut | Default | Description |
|--------------------|------------|---------------------|--------------|
| --weights <path> | | None | Pth to weights file; use `--model stable-diffusion-1.4` instead |
| --laion400m | -l | False | Use older LAION400m weights; use `--model=laion400m` instead |
| Argument | Shortcut | Default | Description |
|--------------------|------------|---------------------|--------------|
| `--weights <path>` | | `None` | Pth to weights file; use `--model stable-diffusion-1.4` instead |
| `--laion400m` | `-l` | `False` | Use older LAION400m weights; use `--model=laion400m` instead |
**A note on path names:** On Windows systems, you may run into
problems when passing the dream script standard backslashed path
names because the Python interpreter treats "\" as an escape.
You can either double your slashes (ick): C:\\\\path\\\\to\\\\my\\\\file, or
use Linux/Mac style forward slashes (better): C:/path/to/my/file.
</div>
!!! tip
On Windows systems, you may run into
problems when passing the invoke script standard backslashed path
names because the Python interpreter treats "\" as an escape.
You can either double your slashes (ick): `C:\\path\\to\\my\\file`, or
use Linux/Mac style forward slashes (better): `C:/path/to/my/file`.
## List of prompt arguments
After the dream.py script initializes, it will present you with a
**dream>** prompt. Here you can enter information to generate images
from text (txt2img), to embellish an existing image or sketch
(img2img), or to selectively alter chosen regions of the image
(inpainting).
After the invoke.py script initializes, it will present you with a
`invoke>` prompt. Here you can enter information to generate images
from text ([txt2img](#txt2img)), to embellish an existing image or sketch
([img2img](#img2img)), or to selectively alter chosen regions of the image
([inpainting](#inpainting)).
### This is an example of txt2img:
### txt2img
~~~~
dream> waterfall and rainbow -W640 -H480
~~~~
!!! example
This will create the requested image with the dimensions 640 (width)
and 480 (height).
```bash
invoke> waterfall and rainbow -W640 -H480
```
Here are the dream> command that apply to txt2img:
This will create the requested image with the dimensions 640 (width)
and 480 (height).
| Argument | Shortcut | Default | Description |
Here are the invoke> command that apply to txt2img:
| Argument <img width="680" align="right"/> | Shortcut <img width="420" align="right"/> | Default <img width="480" align="right"/> | Description |
|--------------------|------------|---------------------|--------------|
| "my prompt" | | | Text prompt to use. The quotation marks are optional. |
| --width <int> | -W<int> | 512 | Width of generated image |
@ -147,17 +154,22 @@ Here are the dream> command that apply to txt2img:
| --seed <int> | -S<int> | None | Set the random seed for the next series of images. This can be used to recreate an image generated previously.|
| --sampler <sampler>| -A<sampler>| k_lms | Sampler to use. Use -h to get list of available samplers. |
| --hires_fix | | | Larger images often have duplication artefacts. This option suppresses duplicates by generating the image at low res, and then using img2img to increase the resolution |
| --png_compression <0-9> | -z<0-9> | 6 | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
| --grid | -g | False | Turn on grid mode to return a single image combining all the images generated by this prompt |
| --individual | -i | True | Turn off grid mode (deprecated; leave off --grid instead) |
| --outdir <path> | -o<path> | outputs/img_samples | Temporarily change the location of these images |
| --seamless | | False | Activate seamless tiling for interesting effects |
| --seamless_axes | | x,y | Specify which axes to use circular convolution on. |
| --log_tokenization | -t | False | Display a color-coded list of the parsed tokens derived from the prompt |
| --skip_normalization| -x | False | Weighted subprompts will not be normalized. See [Weighted Prompts](./OTHER.md#weighted-prompts) |
| --upscale <int> <float> | -U <int> <float> | -U 1 0.75| Upscale image by magnification factor (2, 4), and set strength of upscaling (0.0-1.0). If strength not set, will default to 0.75. |
| --gfpgan_strength <float> | -G <float> | -G0 | Fix faces using the GFPGAN algorithm; argument indicates how hard the algorithm should try (0.0-1.0) |
| --facetool_strength <float> | -G <float> | -G0 | Fix faces (defaults to using the GFPGAN algorithm); argument indicates how hard the algorithm should try (0.0-1.0) |
| --facetool <name> | -ft <name> | -ft gfpgan | Select face restoration algorithm to use: gfpgan, codeformer |
| --codeformer_fidelity | -cf <float> | 0.75 | Used along with CodeFormer. Takes values between 0 and 1. 0 produces high quality but low accuracy. 1 produces high accuracy but low quality |
| --save_original | -save_orig| False | When upscaling or fixing faces, this will cause the original image to be saved rather than replaced. |
| --variation <float> |-v<float>| 0.0 | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with -S<seed> and -n<int> to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
| --with_variations <pattern> | -V<pattern>| None | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
| --with_variations <pattern> | | None | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
| --save_intermediates <n> | | None | Save the image from every nth step into an "intermediates" folder inside the output directory |
Note that the width and height of the image must be multiples of
64. You can provide different values, but they will be rounded down to
@ -167,7 +179,7 @@ the nearest multiple of 64.
### This is an example of img2img:
~~~~
dream> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
invoke> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
~~~~
This will modify the indicated vacation photograph by making it more
@ -179,69 +191,286 @@ photo and you may run out of memory if it is large.
In addition to the command-line options recognized by txt2img, img2img
accepts additional options:
| Argument | Shortcut | Default | Description |
|--------------------|------------|---------------------|--------------|
| --init_img <path> | -I<path> | None | Path to the initialization image |
| --fit | -F | False | Scale the image to fit into the specified -H and -W dimensions |
| --strength <float> | -s<float> | 0.75 | How hard to try to match the prompt to the initial image. Ranges from 0.0-0.99, with higher values replacing the initial image completely.|
| Argument <img width="160" align="right"/> | Shortcut | Default | Description |
|----------------------|-------------|-----------------|--------------|
| `--init_img <path>` | `-I<path>` | `None` | Path to the initialization image |
| `--fit` | `-F` | `False` | Scale the image to fit into the specified -H and -W dimensions |
| `--strength <float>` | `-s<float>` | `0.75` | How hard to try to match the prompt to the initial image. Ranges from 0.0-0.99, with higher values replacing the initial image completely.|
### This is an example of inpainting:
### inpainting
~~~~
dream> waterfall and rainbow -I./vacation-photo.png -M./vacation-mask.png -W640 -H480 --fit
~~~~
!!! example
This will do the same thing as img2img, but image alterations will
only occur within transparent areas defined by the mask file specified
by -M. You may also supply just a single initial image with the areas
to overpaint made transparent, but you must be careful not to destroy
the pixels underneath when you create the transparent areas. See
[Inpainting](./INPAINTING.md) for details.
```bash
invoke> waterfall and rainbow -I./vacation-photo.png -M./vacation-mask.png -W640 -H480 --fit
```
This will do the same thing as img2img, but image alterations will
only occur within transparent areas defined by the mask file specified
by `-M`. You may also supply just a single initial image with the areas
to overpaint made transparent, but you must be careful not to destroy
the pixels underneath when you create the transparent areas. See
[Inpainting](./INPAINTING.md) for details.
inpainting accepts all the arguments used for txt2img and img2img, as
well as the --mask (-M) argument:
well as the --mask (-M) and --text_mask (-tm) arguments:
| Argument | Shortcut | Default | Description |
| Argument <img width="100" align="right"/> | Shortcut | Default | Description |
|--------------------|------------|---------------------|--------------|
| --init_mask <path> | -M<path> | None |Path to an image the same size as the initial_image, with areas for inpainting made transparent.|
| `--init_mask <path>` | `-M<path>` | `None` |Path to an image the same size as the initial_image, with areas for inpainting made transparent.|
| `--text_mask <prompt> [<float>]` | `-tm <prompt> [<float>]` | <none> | Create a mask from a text prompt describing part of the image|
`--text_mask` (short form `-tm`) is a way to generate a mask using a
text description of the part of the image to replace. For example, if
you have an image of a breakfast plate with a bagel, toast and
scrambled eggs, you can selectively mask the bagel and replace it with
a piece of cake this way:
# Convenience commands
~~~
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel
~~~
In addition to the standard image generation arguments, there are a
series of convenience commands that begin with !:
The algorithm uses <a
href="https://github.com/timojl/clipseg">clipseg</a> to classify
different regions of the image. The classifier puts out a confidence
score for each region it identifies. Generally regions that score
above 0.5 are reliable, but if you are getting too much or too little
masking you can adjust the threshold down (to get more mask), or up
(to get less). In this example, by passing `-tm` a higher value, we
are insisting on a more stringent classification.
## !fix
~~~
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel 0.6
~~~
# Other Commands
The CLI offers a number of commands that begin with "!".
## Postprocessing images
To postprocess a file using face restoration or upscaling, use the
`!fix` command.
### `!fix`
This command runs a post-processor on a previously-generated image. It
takes a PNG filename or path and applies your choice of the -U, -G, or
--embiggen switches in order to fix faces or upscale. If you provide a
takes a PNG filename or path and applies your choice of the `-U`, `-G`, or
`--embiggen` switches in order to fix faces or upscale. If you provide a
filename, the script will look for it in the current output
directory. Otherwise you can provide a full or partial path to the
desired file.
Some examples:
Upscale to 4X its original size and fix faces using codeformer:
~~~
dream> !fix 0000045.4829112.png -G1 -U4 -ft codeformer
~~~
!!! example ""
Use the GFPGAN algorithm to fix faces, then upscale to 3X using --embiggen:
Upscale to 4X its original size and fix faces using codeformer:
~~~
dream> !fix 0000045.4829112.png -G0.8 -ft gfpgan
>> fixing outputs/img-samples/0000045.4829112.png
>> retrieved seed 4829112 and prompt "boy enjoying a banana split"
>> GFPGAN - Restoring Faces for image seed:4829112
Outputs:
[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8
```bash
invoke> !fix 0000045.4829112.png -G1 -U4 -ft codeformer
```
dream> !fix 000017.4829112.gfpgan-00.png --embiggen 3
...lots of text...
Outputs:
[2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix "outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512 -H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25
~~~
!!! example ""
Use the GFPGAN algorithm to fix faces, then upscale to 3X using --embiggen:
```bash
invoke> !fix 0000045.4829112.png -G0.8 -ft gfpgan
>> fixing outputs/img-samples/0000045.4829112.png
>> retrieved seed 4829112 and prompt "boy enjoying a banana split"
>> GFPGAN - Restoring Faces for image seed:4829112
Outputs:
[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8
# Model selection and importation
The CLI allows you to add new models on the fly, as well as to switch
among them rapidly without leaving the script.
## !models
This prints out a list of the models defined in `config/models.yaml'.
The active model is bold-faced
Example:
<pre>
laion400m not loaded <no description>
<b>stable-diffusion-1.4 active Stable Diffusion v1.4</b>
waifu-diffusion not loaded Waifu Diffusion v1.3
</pre>
## !switch <model>
This quickly switches from one model to another without leaving the
CLI script. `invoke.py` uses a memory caching system; once a model
has been loaded, switching back and forth is quick. The following
example shows this in action. Note how the second column of the
`!models` table changes to `cached` after a model is first loaded,
and that the long initialization step is not needed when loading
a cached model.
<pre>
invoke> !models
laion400m not loaded <no description>
<b>stable-diffusion-1.4 cached Stable Diffusion v1.4</b>
waifu-diffusion active Waifu Diffusion v1.3
invoke> !switch waifu-diffusion
>> Caching model stable-diffusion-1.4 in system RAM
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
| LatentDiffusion: Running in eps-prediction mode
| DiffusionWrapper has 859.52 M params.
| Making attention of type 'vanilla' with 512 in_channels
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
| Making attention of type 'vanilla' with 512 in_channels
| Using faster float16 precision
>> Model loaded in 18.24s
>> Max VRAM used to load the model: 2.17G
>> Current VRAM usage:2.17G
>> Setting Sampler to k_lms
invoke> !models
laion400m not loaded <no description>
stable-diffusion-1.4 cached Stable Diffusion v1.4
<b>waifu-diffusion active Waifu Diffusion v1.3</b>
invoke> !switch stable-diffusion-1.4
>> Caching model waifu-diffusion in system RAM
>> Retrieving model stable-diffusion-1.4 from system RAM cache
>> Setting Sampler to k_lms
invoke> !models
laion400m not loaded <no description>
<b>stable-diffusion-1.4 active Stable Diffusion v1.4</b>
waifu-diffusion cached Waifu Diffusion v1.3
</pre>
## !import_model <path/to/model/weights>
This command imports a new model weights file into InvokeAI, makes it
available for image generation within the script, and writes out the
configuration for the model into `config/models.yaml` for use in
subsequent sessions.
Provide `!import_model` with the path to a weights file ending in
`.ckpt`. If you type a partial path and press tab, the CLI will
autocomplete. Although it will also autocomplete to `.vae` files,
these are not currenty supported (but will be soon).
When you hit return, the CLI will prompt you to fill in additional
information about the model, including the short name you wish to use
for it with the `!switch` command, a brief description of the model,
the default image width and height to use with this model, and the
model's configuration file. The latter three fields are automatically
filled with reasonable defaults. In the example below, the bold-faced
text shows what the user typed in with the exception of the width,
height and configuration file paths, which were filled in
automatically.
Example:
<pre>
invoke> <b>!import_model models/ldm/stable-diffusion-v1/ model-epoch08-float16.ckpt</b>
>> Model import in process. Please enter the values needed to configure this model:
Name for this model: <b>waifu-diffusion</b>
Description of this model: <b>Waifu Diffusion v1.3</b>
Configuration file for this model: <b>configs/stable-diffusion/v1-inference.yaml</b>
Default image width: <b>512</b>
Default image height: <b>512</b>
>> New configuration:
waifu-diffusion:
config: configs/stable-diffusion/v1-inference.yaml
description: Waifu Diffusion v1.3
height: 512
weights: models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
width: 512
OK to import [n]? <b>y</b>
>> Caching model stable-diffusion-1.4 in system RAM
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch08-float16.ckpt
| LatentDiffusion: Running in eps-prediction mode
| DiffusionWrapper has 859.52 M params.
| Making attention of type 'vanilla' with 512 in_channels
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
| Making attention of type 'vanilla' with 512 in_channels
| Using faster float16 precision
invoke>
</pre>
##!edit_model <name_of_model>
The `!edit_model` command can be used to modify a model that is
already defined in `config/models.yaml`. Call it with the short
name of the model you wish to modify, and it will allow you to
modify the model's `description`, `weights` and other fields.
Example:
<pre>
invoke> <b>!edit_model waifu-diffusion</b>
>> Editing model waifu-diffusion from configuration file ./configs/models.yaml
description: <b>Waifu diffusion v1.4beta</b>
weights: models/ldm/stable-diffusion-v1/<b>model-epoch10-float16.ckpt</b>
config: configs/stable-diffusion/v1-inference.yaml
width: 512
height: 512
>> New configuration:
waifu-diffusion:
config: configs/stable-diffusion/v1-inference.yaml
description: Waifu diffusion v1.4beta
weights: models/ldm/stable-diffusion-v1/model-epoch10-float16.ckpt
height: 512
width: 512
OK to import [n]? y
>> Caching model stable-diffusion-1.4 in system RAM
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch10-float16.ckpt
...
</pre>
=======
invoke> !fix 000017.4829112.gfpgan-00.png --embiggen 3
...lots of text...
Outputs:
[2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix "outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512 -H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25
```
# History processing
The CLI provides a series of convenient commands for reviewing previous
actions, retrieving them, modifying them, and re-running them.
```bash
invoke> !fetch 0000015.8929913.png
# the script returns the next line, ready for editing and running:
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
```
Note that this command may behave unexpectedly if given a PNG file that
was not generated by InvokeAI.
### `!history`
The invoke script keeps track of all the commands you issue during a
session, allowing you to re-run them. On Mac and Linux systems, it
also writes the command-line history out to disk, giving you access to
the most recent 1000 commands issued.
The `!history` command will return a numbered list of all the commands
issued during the session (Windows), or the most recent 1000 commands
(Mac|Linux). You can then repeat a command by using the command `!NNN`,
where "NNN" is the history line number. For example:
```bash
invoke> !history
...
[14] happy woman sitting under tree wearing broad hat and flowing garment
[15] beautiful woman sitting under tree wearing broad hat and flowing garment
[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6
[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
...
invoke> !20
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
```
## !fetch
@ -251,90 +480,75 @@ provide either the name of a file in the current output directory, or
a full file path.
~~~
dream> !fetch 0000015.8929913.png
invoke> !fetch 0000015.8929913.png
# the script returns the next line, ready for editing and running:
dream> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
~~~
Note that this command may behave unexpectedly if given a PNG file that
was not generated by InvokeAI.
## !history
The dream script keeps track of all the commands you issue during a
session, allowing you to re-run them. On Mac and Linux systems, it
also writes the command-line history out to disk, giving you access to
the most recent 1000 commands issued.
The `!history` command will return a numbered list of all the commands
issued during the session (Windows), or the most recent 1000 commands
(Mac|Linux). You can then repeat a command by using the command !NNN,
where "NNN" is the history line number. For example:
~~~
dream> !history
...
[14] happy woman sitting under tree wearing broad hat and flowing garment
[15] beautiful woman sitting under tree wearing broad hat and flowing garment
[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6
[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
...
dream> !20
dream> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
~~~
## !search <search string>
### !search <search string>
This is similar to !history but it only returns lines that contain
`search string`. For example:
~~~
dream> !search surreal
```bash
invoke> !search surreal
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
~~~
```
## !clear
### `!clear`
This clears the search history from memory and disk. Be advised that
this operation is irreversible and does not issue any warnings!
# Command-line editing and completion
Other ! Commands
### !mask
This command takes an image, a text prompt, and uses the `clipseg`
algorithm to automatically generate a mask of the area that matches
the text prompt. It is useful for debugging the text masking process
prior to inpainting with the `--text_mask` argument. See
[INPAINTING.md] for details.
## Command-line editing and completion
The command-line offers convenient history tracking, editing, and
command completion.
- To scroll through previous commands and potentially edit/reuse them, use the up and down cursor keys.
- To edit the current command, use the left and right cursor keys to position the cursor, and then backspace, delete or insert characters.
- To move to the very beginning of the command, type CTRL-A (or command-A on the Mac)
- To move to the end of the command, type CTRL-E.
- To cut a section of the command, position the cursor where you want to start cutting and type CTRL-K.
- To paste a cut section back in, position the cursor where you want to paste, and type CTRL-Y
- To scroll through previous commands and potentially edit/reuse them, use the ++up++ and ++down++ keys.
- To edit the current command, use the ++left++ and ++right++ keys to position the cursor, and then ++backspace++, ++delete++ or insert characters.
- To move to the very beginning of the command, type ++ctrl+a++ (or ++command+a++ on the Mac)
- To move to the end of the command, type ++ctrl+e++.
- To cut a section of the command, position the cursor where you want to start cutting and type ++ctrl+k++
- To paste a cut section back in, position the cursor where you want to paste, and type ++ctrl+y++
Windows users can get similar, but more limited, functionality if they
launch dream.py with the "winpty" program and have the `pyreadline3`
launch `invoke.py` with the `winpty` program and have the `pyreadline3`
library installed:
~~~
> winpty python scripts\dream.py
~~~
```batch
> winpty python scripts\invoke.py
```
On the Mac and Linux platforms, when you exit dream.py, the last 1000
On the Mac and Linux platforms, when you exit invoke.py, the last 1000
lines of your command-line history will be saved. When you restart
dream.py, you can access the saved history using the up-arrow key.
`invoke.py`, you can access the saved history using the ++up++ key.
In addition, limited command-line completion is installed. In various
contexts, you can start typing your command and press tab. A list of
contexts, you can start typing your command and press ++tab++. A list of
potential completions will be presented to you. You can then type a
little more, hit tab again, and eventually autocomplete what you want.
little more, hit ++tab++ again, and eventually autocomplete what you want.
When specifying file paths using the one-letter shortcuts, the CLI
will attempt to complete pathnames for you. This is most handy for the
-I (init image) and -M (init mask) paths. To initiate completion, start
the path with a slash ("/") or "./". For example:
`-I` (init image) and `-M` (init mask) paths. To initiate completion, start
the path with a slash (`/`) or `./`. For example:
~~~
dream> zebra with a mustache -I./test-pictures<TAB>
```bash
invoke> zebra with a mustache -I./test-pictures<TAB>
-I./test-pictures/Lincoln-and-Parrot.png -I./test-pictures/zebra.jpg -I./test-pictures/madonna.png
-I./test-pictures/bad-sketch.png -I./test-pictures/man_with_eagle/
```

View File

@ -43,7 +43,7 @@ it's similar to that, except it can work up to an arbitrarily large size
has extra logic to re-run any number of the tile sub-sections of the image
if for example a small part of a huge run got messed up.
## Usage
### Usage
`-embiggen <scaling_factor> <esrgan_strength> <overlap_ratio OR overlap_pixels>`
@ -100,26 +100,30 @@ Tiles are numbered starting with one, and left-to-right,
top-to-bottom. So, if you are generating a 3x3 tiled image, the
middle row would be `4 5 6`.
## Example Usage
### Examples
Running Embiggen with 512x512 tiles on an existing image, scaling up by a factor of 2.5x;
and doing the same again (default ESRGAN strength is 0.75, default overlap between tiles is 0.25):
!!! example ""
```bash
dream > a photo of a forest at sunset -s 100 -W 512 -H 512 -I outputs/forest.png -f 0.4 -embiggen 2.5
dream > a photo of a forest at sunset -s 100 -W 512 -H 512 -I outputs/forest.png -f 0.4 -embiggen 2.5 0.75 0.25
```
Running Embiggen with 512x512 tiles on an existing image, scaling up by a factor of 2.5x;
and doing the same again (default ESRGAN strength is 0.75, default overlap between tiles is 0.25):
If your starting image was also 512x512 this should have taken 9 tiles.
```bash
invoke > a photo of a forest at sunset -s 100 -W 512 -H 512 -I outputs/forest.png -f 0.4 -embiggen 2.5
invoke > a photo of a forest at sunset -s 100 -W 512 -H 512 -I outputs/forest.png -f 0.4 -embiggen 2.5 0.75 0.25
```
If there weren't enough clouds in the sky of that forest you just made
(and that image is about 1280 pixels (512*2.5) wide A.K.A. three
512x512 tiles with 0.25 overlaps wide) we can replace that top row of
tiles:
If your starting image was also 512x512 this should have taken 9 tiles.
```bash
dream> a photo of puffy clouds over a forest at sunset -s 100 -W 512 -H 512 -I outputs/000002.seed.png -f 0.5 -embiggen_tiles 1 2 3
```
!!! example ""
If there weren't enough clouds in the sky of that forest you just made
(and that image is about 1280 pixels (512*2.5) wide A.K.A. three
512x512 tiles with 0.25 overlaps wide) we can replace that top row of
tiles:
```bash
invoke> a photo of puffy clouds over a forest at sunset -s 100 -W 512 -H 512 -I outputs/000002.seed.png -f 0.5 -embiggen_tiles 1 2 3
```
## Fixing Previously-Generated Images
@ -128,27 +132,27 @@ look up the original prompt and provide an initial image. Just use the
syntax `!fix path/to/file.png <embiggen>`. For example, you can rewrite the
previous command to look like this:
~~~~
dream> !fix ./outputs/000002.seed.png -embiggen_tiles 1 2 3
~~~~
```bash
invoke> !fix ./outputs/000002.seed.png -embiggen_tiles 1 2 3
```
A new file named `000002.seed.fixed.png` will be created in the output directory. Note that
the `!fix` command does not replace the original file, unlike the behavior at generate time.
You do not need to provide the prompt, and `!fix` automatically selects a good strength for
embiggen-ing.
!!! note
**Note**
Because the same prompt is used on all the tiled images, and the model
doesn't have the context of anything outside the tile being run - it
can end up creating repeated pattern (also called 'motifs') across all
the tiles based on that prompt. The best way to combat this is
lowering the `--strength` (`-f`) to stay more true to the init image,
and increasing the number of steps so there is more compute-time to
create the detail. Anecdotally `--strength` 0.35-0.45 works pretty
well on most things. It may also work great in some examples even with
the `--strength` set high for patterns, landscapes, or subjects that
are more abstract. Because this is (relatively) fast, you can also
preserve the best parts from each.
Because the same prompt is used on all the tiled images, and the model
doesn't have the context of anything outside the tile being run - it
can end up creating repeated pattern (also called 'motifs') across all
the tiles based on that prompt. The best way to combat this is
lowering the `--strength` (`-f`) to stay more true to the init image,
and increasing the number of steps so there is more compute-time to
create the detail. Anecdotally `--strength` 0.35-0.45 works pretty
well on most things. It may also work great in some examples even with
the `--strength` set high for patterns, landscapes, or subjects that
are more abstract. Because this is (relatively) fast, you can also
preserve the best parts from each.
Author: [Travco](https://github.com/travco)

View File

@ -2,7 +2,9 @@
title: Image-to-Image
---
# :material-image-multiple: **IMG2IMG**
# :material-image-multiple: Image-to-Image
## `img2img`
This script also provides an `img2img` feature that lets you seed your creations with an initial
drawing or photo. This is a really cool feature that tells stable diffusion to build the prompt on
@ -10,77 +12,122 @@ top of the image you provide, preserving the original's basic shape and layout.
the `--init_img` option as shown here:
```commandline
dream> "waterfall and rainbow" --init_img=./init-images/crude_drawing.png --strength=0.5 -s100 -n4
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
```
The `--init_img (-I)` option gives the path to the seed picture. `--strength (-f)` controls how much
This will take the original image shown here:
<div align="center" markdown>
<img src="https://user-images.githubusercontent.com/50542132/193946000-c42a96d8-5a74-4f8a-b4c3-5213e6cadcce.png" width=350>
</div>
and generate a new image based on it as shown here:
<div align="center" markdown>
<img src="https://user-images.githubusercontent.com/111189/194135515-53d4c060-e994-4016-8121-7c685e281ac9.png" width=350>
</div>
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength` (`-f`) controls how much
the original will be modified, ranging from `0.0` (keep the original intact), to `1.0` (ignore the
original completely). The default is `0.75`, and ranges from `0.25-0.75` give interesting results.
original completely). The default is `0.75`, and ranges from `0.25-0.90` give interesting results.
Other relevant options include `-C` (classification free guidance scale), and `-s` (steps). Unlike `txt2img`,
adding steps will continuously change the resulting image and it will not converge.
You may also pass a `-v<variation_amount>` option to generate `-n<iterations>` count variants on
the original image. This is done by passing the first generated image
back into img2img the requested number of times. It generates
interesting variants.
Note that the prompt makes a big difference. For example, this slight variation on the prompt produces
a very different image:
`photograph of a tree on a hill with a river`
<div align="center" markdown>
<img src="https://user-images.githubusercontent.com/111189/194135220-16b62181-b60c-4248-8989-4834a8fd7fbd.png" width=350>
</div>
!!! tip
When designing prompts, think about how the images scraped from the internet were captioned. Very few photographs will
be labeled "photograph" or "photorealistic." They will, however, be captioned with the publication, photographer, camera
model, or film settings.
If the initial image contains transparent regions, then Stable Diffusion will only draw within the
transparent regions, a process called "inpainting". However, for this to work correctly, the color
transparent regions, a process called [`inpainting`](./INPAINTING.md#creating-transparent-regions-for-inpainting). However, for this to work correctly, the color
information underneath the transparent needs to be preserved, not erased.
More details can be found here:
[Creating Transparent Images For Inpainting](./INPAINTING.md#creating-transparent-regions-for-inpainting)
!!! warning
**IMPORTANT ISSUE** `img2img` does not work properly on initial images smaller than 512x512. Please scale your
image to at least 512x512 before using it. Larger images are not a problem, but may run out of VRAM on your
GPU card. To fix this, use the --fit option, which downscales the initial image to fit within the box specified
by width x height:
~~~
tree on a hill with a river, national geographic -I./test-pictures/big-sketch.png -H512 -W512 --fit
~~~
## How does it actually work, though?
The main difference between `img2img` and `prompt2img` is the starting point. While `prompt2img` always starts with pure
gaussian noise and progressively refines it over the requested number of steps, `img2img` skips some of these earlier steps
(how many it skips is indirectly controlled by the `--strength` parameter), and uses instead your initial image mixed with gaussian noise as the starting image.
The main difference between `img2img` and `prompt2img` is the starting point. While `prompt2img` always starts with pure
gaussian noise and progressively refines it over the requested number of steps, `img2img` skips some of these earlier steps
(how many it skips is indirectly controlled by the `--strength` parameter), and uses instead your initial image mixed with gaussian noise as the starting image.
**Let's start** by thinking about vanilla `prompt2img`, just generating an image from a prompt. If the step count is 10, then the "latent space" (Stable Diffusion's internal representation of the image) for the prompt "fire" with seed `1592514025` develops something like this:
```commandline
dream> "fire" -s10 -W384 -H384 -S1592514025
invoke> "fire" -s10 -W384 -H384 -S1592514025
```
<div align="center" markdown>
![latent steps](../assets/img2img/000019.steps.png)
</div>
Put simply: starting from a frame of fuzz/static, SD finds details in each frame that it thinks look like "fire" and brings them a little bit more into focus, gradually scrubbing out the fuzz until a clear image remains.
Put simply: starting from a frame of fuzz/static, SD finds details in each frame that it thinks look like "fire" and brings them a little bit more into focus, gradually scrubbing out the fuzz until a clear image remains.
**When you use `img2img`** some of the earlier steps are cut, and instead an initial image of your choice is used. But because of how the maths behind Stable Diffusion works, this image needs to be mixed with just the right amount of noise (fuzz/static) for where it is being inserted. This is where the strength parameter comes in. Depending on the set strength, your image will be inserted into the sequence at the appropriate point, with just the right amount of noise.
**When you use `img2img`** some of the earlier steps are cut, and instead an initial image of your choice is used. But because of how the maths behind Stable Diffusion works, this image needs to be mixed with just the right amount of noise (fuzz/static) for where it is being inserted. This is where the strength parameter comes in. Depending on the set strength, your image will be inserted into the sequence at the appropriate point, with just the right amount of noise.
### A concrete example
Say I want SD to draw a fire based on this hand-drawn image:
I want SD to draw a fire based on this hand-drawn image:
<div align="center" markdown>
![drawing of a fireplace](../assets/img2img/fire-drawing.png)
</div>
Let's only do 10 steps, to make it easier to see what's happening. If strength is `0.7`, this is what the internal steps the algorithm has to take will look like:
![](../assets/img2img/000032.steps.gravity.png)
<div align="center" markdown>
![gravity32](../assets/img2img/000032.steps.gravity.png)
</div>
With strength `0.4`, the steps look more like this:
![](../assets/img2img/000030.steps.gravity.png)
<div align="center" markdown>
![gravity30](../assets/img2img/000030.steps.gravity.png)
</div>
Notice how much more fuzzy the starting image is for strength `0.7` compared to `0.4`, and notice also how much longer the sequence is with `0.7`:
| | strength = 0.7 | strength = 0.4 |
| -- | -- | -- |
| initial image that SD sees | ![](../assets/img2img/000032.step-0.png) | ![](../assets/img2img/000030.step-0.png) |
| steps argument to `dream>` | `-S10` | `-S10` |
| steps argument to `invoke>` | `-S10` | `-S10` |
| steps actually taken | 7 | 4 |
| latent space at each step | ![](../assets/img2img/000032.steps.gravity.png) | ![](../assets/img2img/000030.steps.gravity.png) |
| output | ![](../assets/img2img/000032.1592514025.png) | ![](../assets/img2img/000030.1592514025.png) |
| latent space at each step | ![gravity32](../assets/img2img/000032.steps.gravity.png) | ![gravity30](../assets/img2img/000030.steps.gravity.png) |
| output | ![000032.1592514025](../assets/img2img/000032.1592514025.png) | ![000030.1592514025](../assets/img2img/000030.1592514025.png) |
Both of the outputs look kind of like what I was thinking of. With the strength higher, my input becomes more vague, *and* Stable Diffusion has more steps to refine its output. But it's not really making what I want, which is a picture of cheery open fire. With the strength lower, my input is more clear, *but* Stable Diffusion has less chance to refine itself, so the result ends up inheriting all the problems of my bad drawing.
If you want to try this out yourself, all of these are using a seed of `1592514025` with a width/height of `384`, step count `10`, the default sampler (`k_lms`), and the single-word prompt `"fire"`:
If you want to try this out yourself, all of these are using a seed of `1592514025` with a width/height of `384`, step count `10`, the default sampler (`k_lms`), and the single-word prompt `fire`:
```commandline
dream> "fire" -s10 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png --strength 0.7
invoke> "fire" -s10 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png --strength 0.7
```
The code for rendering intermediates is on my (damian0815's) branch [document-img2img](https://github.com/damian0815/InvokeAI/tree/document-img2img) - run `dream.py` and check your `outputs/img-samples/intermediates` folder while generating an image.
The code for rendering intermediates is on my (damian0815's) branch [document-img2img](https://github.com/damian0815/InvokeAI/tree/document-img2img) - run `invoke.py` and check your `outputs/img-samples/intermediates` folder while generating an image.
### Compensating for the reduced step count
@ -89,36 +136,42 @@ After putting this guide together I was curious to see how the difference would
Here's strength `0.4` (note step count `50`, which is `20 ÷ 0.4` to make sure SD does `20` steps from my image):
```commandline
dream> "fire" -s50 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.4
invoke> "fire" -s50 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.4
```
![](../assets/img2img/000035.1592514025.png)
<div align="center" markdown>
![000035.1592514025](../assets/img2img/000035.1592514025.png)
</div>
and strength `0.7` (note step count `30`, which is roughly `20 ÷ 0.7` to make sure SD does `20` steps from my image):
and here is strength `0.7` (note step count `30`, which is roughly `20 ÷ 0.7` to make sure SD does `20` steps from my image):
```commandline
dream> "fire" -s30 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.7
invoke> "fire" -s30 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.7
```
![](../assets/img2img/000046.1592514025.png)
<div align="center" markdown>
![000046.1592514025](../assets/img2img/000046.1592514025.png)
</div>
In both cases the image is nice and clean and "finished", but because at strength `0.7` Stable Diffusion has been give so much more freedom to improve on my badly-drawn flames, they've come out looking much better. You can really see the difference when looking at the latent steps. There's more noise on the first image with strength `0.7`:
![](../assets/img2img/000046.steps.gravity.png)
![gravity46](../assets/img2img/000046.steps.gravity.png)
than there is for strength `0.4`:
![](../assets/img2img/000035.steps.gravity.png)
![gravity35](../assets/img2img/000035.steps.gravity.png)
and that extra noise gives the algorithm more choices when it is evaluating how to denoise any particular pixel in the image.
and that extra noise gives the algorithm more choices when it is evaluating how to denoise any particular pixel in the image.
Unfortunately, it seems that `img2img` is very sensitive to the step count. Here's strength `0.7` with a step count of `29` (SD did 19 steps from my image):
![](../assets/img2img/000045.1592514025.png)
<div align="center" markdown>
![gravity45](../assets/img2img/000045.1592514025.png)
</div>
By comparing the latents we can sort of see that something got interpreted differently enough on the third or fourth step to lead to a rather different interpretation of the flames.
![](../assets/img2img/000046.steps.gravity.png)
![](../assets/img2img/000045.steps.gravity.png)
![gravity46](../assets/img2img/000046.steps.gravity.png)
![gravity45](../assets/img2img/000045.steps.gravity.png)
This is the result of a difference in the de-noising "schedule" - basically the noise has to be cleaned by a certain degree each step or the model won't "converge" on the image properly (see https://huggingface.co/blog/stable_diffusion for more about that). A different step count means a different schedule, which means things get interpreted slightly differently at every step.
This is the result of a difference in the de-noising "schedule" - basically the noise has to be cleaned by a certain degree each step or the model won't "converge" on the image properly (see [stable diffusion blog](https://huggingface.co/blog/stable_diffusion) for more about that). A different step count means a different schedule, which means things get interpreted slightly differently at every step.

View File

@ -6,27 +6,148 @@ title: Inpainting
## **Creating Transparent Regions for Inpainting**
Inpainting is really cool. To do it, you start with an initial image and use a photoeditor to make
one or more regions transparent (i.e. they have a "hole" in them). You then provide the path to this
image at the dream> command line using the `-I` switch. Stable Diffusion will only paint within the
transparent region.
Inpainting is really cool. To do it, you start with an initial image
and use a photoeditor to make one or more regions transparent
(i.e. they have a "hole" in them). You then provide the path to this
image at the dream> command line using the `-I` switch. Stable
Diffusion will only paint within the transparent region.
There's a catch. In the current implementation, you have to prepare the initial image correctly so
that the underlying colors are preserved under the transparent area. Many imaging editing
applications will by default erase the color information under the transparent pixels and replace
them with white or black, which will lead to suboptimal inpainting. You also must take care to
export the PNG file in such a way that the color information is preserved.
There's a catch. In the current implementation, you have to prepare
the initial image correctly so that the underlying colors are
preserved under the transparent area. Many imaging editing
applications will by default erase the color information under the
transparent pixels and replace them with white or black, which will
lead to suboptimal inpainting. It often helps to apply incomplete
transparency, such as any value between 1 and 99%
If your photoeditor is erasing the underlying color information, `dream.py` will give you a big fat
warning. If you can't find a way to coax your photoeditor to retain color values under transparent
areas, then you can combine the `-I` and `-M` switches to provide both the original unedited image
and the masked (partially transparent) image:
You also must take care to export the PNG file in such a way that the
color information is preserved. There is often an option in the export
dialog that lets you specify this.
If your photoeditor is erasing the underlying color information,
`dream.py` will give you a big fat warning. If you can't find a way to
coax your photoeditor to retain color values under transparent areas,
then you can combine the `-I` and `-M` switches to provide both the
original unedited image and the masked (partially transparent) image:
```bash
dream> "man with cat on shoulder" -I./images/man.png -M./images/man-transparent.png
invoke> "man with cat on shoulder" -I./images/man.png -M./images/man-transparent.png
```
We are hoping to get rid of the need for this workaround in an upcoming release.
## **Masking using Text**
You can also create a mask using a text prompt to select the part of
the image you want to alter, using the <a
href="https://github.com/timojl/clipseg">clipseg</a> algorithm. This
works on any image, not just ones generated by InvokeAI.
The `--text_mask` (short form `-tm`) option takes two arguments. The
first argument is a text description of the part of the image you wish
to mask (paint over). If the text description contains a space, you must
surround it with quotation marks. The optional second argument is the
minimum threshold for the mask classifier's confidence score, described
in more detail below.
To see how this works in practice, here's an image of a still life
painting that I got off the web.
<img src="../assets/still-life-scaled.jpg">
You can selectively mask out the
orange and replace it with a baseball in this way:
~~~
invoke> a baseball -I /path/to/still_life.png -tm orange
~~~
<img src="../assets/still-life-inpainted.png">
The clipseg classifier produces a confidence score for each region it
identifies. Generally regions that score above 0.5 are reliable, but
if you are getting too much or too little masking you can adjust the
threshold down (to get more mask), or up (to get less). In this
example, by passing `-tm` a higher value, we are insisting on a tigher
mask. However, if you make it too high, the orange may not be picked
up at all!
~~~
invoke> a baseball -I /path/to/breakfast.png -tm orange 0.6
~~~
The `!mask` command may be useful for debugging problems with the
text2mask feature. The syntax is `!mask /path/to/image.png -tm <text>
<threshold>`
It will generate three files:
- The image with the selected area highlighted.
- The image with the un-selected area highlighted.
- The image with the selected area converted into a black and white
image according to the threshold level.
Note that none of these images are intended to be used as the mask
passed to invoke via `-M` and may give unexpected results if you try
to use them this way. Instead, use `!mask` for testing that you are
selecting the right mask area, and then do inpainting using the
best selection term and threshold.
Here is an example of how `!mask` works:
```
invoke> !mask ./test-pictures/curly.png -tm hair 0.5
>> generating masks from ./test-pictures/curly.png
>> Initializing clipseg model for text to mask inference
Outputs:
[941.1] outputs/img-samples/000019.curly.hair.deselected.png: !mask ./test-pictures/curly.png -tm hair 0.5
[941.2] outputs/img-samples/000019.curly.hair.selected.png: !mask ./test-pictures/curly.png -tm hair 0.5
[941.3] outputs/img-samples/000019.curly.hair.masked.png: !mask ./test-pictures/curly.png -tm hair 0.5
```
**Original image "curly.png"**
<img src="../assets/outpainting/curly.png">
**000019.curly.hair.selected.png**
<img src="../assets/inpainting/000019.curly.hair.selected.png">
**000019.curly.hair.deselected.png**
<img src="../assets/inpainting/000019.curly.hair.deselected.png">
**000019.curly.hair.masked.png**
<img src="../assets/inpainting/000019.curly.hair.masked.png">
It looks like we selected the hair pretty well at the 0.5 threshold
(which is the default, so we didn't actually have to specify it), so
let's have some fun:
```
invoke> medusa with cobras -I ./test-pictures/curly.png -tm hair 0.5 -C20
>> loaded input image of size 512x512 from ./test-pictures/curly.png
...
Outputs:
[946] outputs/img-samples/000024.801380492.png: "medusa with cobras" -s 50 -S 801380492 -W 512 -H 512 -C 20.0 -I ./test-pictures/curly.png -A k_lms -f 0.75
```
<img src="../assets/inpainting/000024.801380492.png">
### Inpainting is not changing the masked region enough!
One of the things to understand about how inpainting works is that it
is equivalent to running img2img on just the masked (transparent)
area. img2img builds on top of the existing image data, and therefore
will attempt to preserve colors, shapes and textures to the best of
its ability. Unfortunately this means that if you want to make a
dramatic change in the inpainted region, for example replacing a red
wall with a blue one, the algorithm will fight you.
You have a couple of options. The first is to increase the values of
the requested steps (`-sXXX`), strength (`-f0.XX`), and/or
condition-free guidance (`-CXX.X`). If this is not working for you, a
more extreme step is to provide the `--inpaint_replace 0.X` (`-r0.X`)
option. This value ranges from 0.0 to 1.0. The higher it is the less
attention the algorithm will pay to the data underneath the masked
region. At high values this will enable you to replace colored regions
entirely, but beware that the masked region mayl not blend in with the
surrounding unmasked regions as well.
---
@ -36,41 +157,42 @@ We are hoping to get rid of the need for this workaround in an upcoming release.
1. Open image in GIMP.
2. Layer->Transparency->Add Alpha Channel
3. Use lasoo tool to select region to mask
3. Use lasso tool to select region to mask
4. Choose Select -> Float to create a floating selection
5. Open the Layers toolbar (++ctrl+l++) and select "Floating Selection"
6. Set opacity to 0%
5. Open the Layers toolbar (^L) and select "Floating Selection"
6. Set opacity to a value between 0% and 99%
7. Export as PNG
8. In the export dialogue, Make sure the "Save colour values from
transparent pixels" checkbox is selected.
---
## Recipe for Adobe Photoshop
1. Open image in Photoshop
![step1](../assets/step1.png)
<div align="center" markdown>![step1](../assets/step1.png)</div>
2. Use any of the selection tools (Marquee, Lasso, or Wand) to select the area you desire to inpaint.
![step2](../assets/step2.png)
<div align="center" markdown>![step2](../assets/step2.png)</div>
3. Because we'll be applying a mask over the area we want to preserve, you should now select the inverse by using the ++shift+ctrl+i++ shortcut, or right clicking and using the "Select Inverse" option.
4. You'll now create a mask by selecting the image layer, and Masking the selection. Make sure that you don't delete any of the undrlying image, or your inpainting results will be dramatically impacted.
4. You'll now create a mask by selecting the image layer, and Masking the selection. Make sure that you don't delete any of the underlying image, or your inpainting results will be dramatically impacted.
![step4](../assets/step4.png)
<div align="center" markdown>![step4](../assets/step4.png)</div>
5. Make sure to hide any background layers that are present. You should see the mask applied to your image layer, and the image on your canvas should display the checkered background.
![step5](../assets/step5.png)
<div align="center" markdown>![step5](../assets/step5.png)</div>
6. Save the image as a transparent PNG by using the "Save a Copy" option in the File menu, or using the Alt + Ctrl + S keyboard shortcut
6. Save the image as a transparent PNG by using `File`-->`Save a Copy` from the menu bar, or by using the keyboard shortcut ++alt+ctrl+s++
![step6](../assets/step6.png)
<div align="center" markdown>![step6](../assets/step6.png)</div>
7. After following the inpainting instructions above (either through the CLI or the Web UI), marvel at your newfound ability to selectively dream. Lookin' good!
7. After following the inpainting instructions above (either through the CLI or the Web UI), marvel at your newfound ability to selectively invoke. Lookin' good!
![step7](../assets/step7.png)
<div align="center" markdown>![step7](../assets/step7.png)</div>
8. In the export dialogue, Make sure the "Save colour values from transparent pixels" checkbox is selected.

View File

@ -6,15 +6,13 @@ title: Others
## **Google Colab**
Stable Diffusion AI Notebook: <a
href="https://colab.research.google.com/github/lstein/stable-diffusion/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb"
target="_parent">
<img
src="https://colab.research.google.com/assets/colab-badge.svg"
alt="Open In Colab"/></a> <br> Open and follow instructions to use an isolated environment running
Dream.<br>
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg){ align="right" }](https://colab.research.google.com/github/lstein/stable-diffusion/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
Output Example: ![Colab Notebook](../assets/colab_notebook.png)
Open and follow instructions to use an isolated environment running Dream.
Output Example:
![Colab Notebook](../assets/colab_notebook.png)
---
@ -22,10 +20,16 @@ Output Example: ![Colab Notebook](../assets/colab_notebook.png)
The seamless tiling mode causes generated images to seamlessly tile with itself. To use it, add the
`--seamless` option when starting the script which will result in all generated images to tile, or
for each `dream>` prompt as shown here:
for each `invoke>` prompt as shown here:
```python
dream> "pond garden with lotus by claude monet" --seamless -s100 -n4
invoke> "pond garden with lotus by claude monet" --seamless -s100 -n4
```
By default this will tile on both the X and Y axes. However, you can also specify specific axes to tile on with `--seamless_axes`.
Possible values are `x`, `y`, and `x,y`:
```python
invoke> "pond garden with lotus by claude monet" --seamless --seamless_axes=x -s100 -n4
```
---
@ -33,21 +37,21 @@ dream> "pond garden with lotus by claude monet" --seamless -s100 -n4
## **Shortcuts: Reusing Seeds**
Since it is so common to reuse seeds while refining a prompt, there is now a shortcut as of version
1.11. Provide a `**-S**` (or `**--seed**`) switch of `-1` to use the seed of the most recent image
generated. If you produced multiple images with the `**-n**` switch, then you can go back further
using -2, -3, etc. up to the first image generated by the previous command. Sorry, but you can't go
1.11. Provide a `-S` (or `--seed`) switch of `-1` to use the seed of the most recent image
generated. If you produced multiple images with the `-n` switch, then you can go back further
using `-2`, `-3`, etc. up to the first image generated by the previous command. Sorry, but you can't go
back further than one command.
Here's an example of using this to do a quick refinement. It also illustrates using the new `**-G**`
Here's an example of using this to do a quick refinement. It also illustrates using the new `-G`
switch to turn on upscaling and face enhancement (see previous section):
```bash
dream> a cute child playing hopscotch -G0.5
invoke> a cute child playing hopscotch -G0.5
[...]
outputs/img-samples/000039.3498014304.png: "a cute child playing hopscotch" -s50 -W512 -H512 -C7.5 -mk_lms -S3498014304
# I wonder what it will look like if I bump up the steps and set facial enhancement to full strength?
dream> a cute child playing hopscotch -G1.0 -s100 -S -1
invoke> a cute child playing hopscotch -G1.0 -s100 -S -1
reusing previous seed 3498014304
[...]
outputs/img-samples/000040.3498014304.png: "a cute child playing hopscotch" -G1.0 -s100 -W512 -H512 -C7.5 -mk_lms -S3498014304
@ -58,7 +62,7 @@ outputs/img-samples/000040.3498014304.png: "a cute child playing hopscotch" -G1.
## **Weighted Prompts**
You may weight different sections of the prompt to tell the sampler to attach different levels of
priority to them, by adding `:(number)` to the end of the section you wish to up- or downweight. For
priority to them, by adding `:<percent>` to the end of the section you wish to up- or downweight. For
example consider this prompt:
```bash
@ -71,24 +75,30 @@ combination of integers and floating point numbers, and they do not need to add
---
## Thresholding and Perlin Noise Initialization Options
## **Thresholding and Perlin Noise Initialization Options**
Two new options are the thresholding (`--threshold`) and the perlin noise initialization (`--perlin`) options. Thresholding limits the range of the latent values during optimization, which helps combat oversaturation with higher CFG scale values. Perlin noise initialization starts with a percentage (a value ranging from 0 to 1) of perlin noise mixed into the initial noise. Both features allow for more variations and options in the course of generating images.
For better intuition into what these options do in practice, [here is a graphic demonstrating them both](static/truncation_comparison.jpg) in use. In generating this graphic, perlin noise at initialization was programmatically varied going across on the diagram by values 0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 0.8, 0.9, 1.0; and the threshold was varied going down from
For better intuition into what these options do in practice:
![here is a graphic demonstrating them both](../assets/truncation_comparison.jpg)
In generating this graphic, perlin noise at initialization was programmatically varied going across on the diagram by values 0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 0.8, 0.9, 1.0; and the threshold was varied going down from
0, 1, 2, 3, 4, 5, 10, 20, 100. The other options are fixed, so the initial prompt is as follows (no thresholding or perlin noise):
```
a portrait of a beautiful young lady -S 1950357039 -s 100 -C 20 -A k_euler_a --threshold 0 --perlin 0
```bash
invoke> "a portrait of a beautiful young lady" -S 1950357039 -s 100 -C 20 -A k_euler_a --threshold 0 --perlin 0
```
Here's an example of another prompt used when setting the threshold to 5 and perlin noise to 0.2:
```
a portrait of a beautiful young lady -S 1950357039 -s 100 -C 20 -A k_euler_a --threshold 5 --perlin 0.2
```bash
invoke> "a portrait of a beautiful young lady" -S 1950357039 -s 100 -C 20 -A k_euler_a --threshold 5 --perlin 0.2
```
Note: currently the thresholding feature is only implemented for the k-diffusion style samplers, and empirically appears to work best with `k_euler_a` and `k_dpm_2_a`. Using 0 disables thresholding. Using 0 for perlin noise disables using perlin noise for initialization. Finally, using 1 for perlin noise uses only perlin noise for initialization.
!!! note
currently the thresholding feature is only implemented for the k-diffusion style samplers, and empirically appears to work best with `k_euler_a` and `k_dpm_2_a`. Using 0 disables thresholding. Using 0 for perlin noise disables using perlin noise for initialization. Finally, using 1 for perlin noise uses only perlin noise for initialization.
---
@ -120,7 +130,7 @@ internet. In the following runs, it will load up the cached versions of the requ
`.cache` directory of the system.
```bash
(ldm) ~/stable-diffusion$ python3 ./scripts/preload_models.py
(invokeai) ~/stable-diffusion$ python3 ./scripts/preload_models.py
preloading bert tokenizer...
Downloading: 100%|██████████████████████████████████| 28.0/28.0 [00:00<00:00, 49.3kB/s]
Downloading: 100%|██████████████████████████████████| 226k/226k [00:00<00:00, 2.79MB/s]

View File

@ -25,14 +25,16 @@ implementations.
Consider this image:
<div align="center" markdown>
![curly_woman](../assets/outpainting/curly.png)
</div>
Pretty nice, but it's annoying that the top of her head is cut
off. She's also a bit off center. Let's fix that!
~~~~
dream> !fix images/curly.png --outcrop top 64 right 64
~~~~
```bash
invoke> !fix images/curly.png --outcrop top 64 right 64
```
This is saying to apply the `outcrop` extension by extending the top
of the image by 64 pixels, and the right of the image by the same
@ -42,7 +44,9 @@ specify any number of pixels to extend. You can also abbreviate
The result looks like this:
<div align="center" markdown>
![curly_woman_outcrop](../assets/outpainting/curly-outcrop.png)
</div>
The new image is actually slightly larger than the original (576x576,
because 64 pixels were added to the top and right sides.)
@ -66,33 +70,36 @@ The `outpaint` extension does the same thing, but with subtle
differences. Starting with the same image, here is how we would add an
additional 64 pixels to the top of the image:
~~~
dream> !fix images/curly.png --out_direction top 64
~~~
```bash
invoke> !fix images/curly.png --out_direction top 64
```
(you can abbreviate ``--out_direction` as `-D`.
(you can abbreviate `--out_direction` as `-D`.
The result is shown here:
<div align="center" markdown>
![curly_woman_outpaint](../assets/outpainting/curly-outpaint.png)
</div>
Although the effect is similar, there are significant differences from
outcropping:
1. You can only specify one direction to extend at a time.
2. The image is **not** resized. Instead, the image is shifted by the specified
- You can only specify one direction to extend at a time.
- The image is **not** resized. Instead, the image is shifted by the specified
number of pixels. If you look carefully, you'll see that less of the lady's
torso is visible in the image.
3. Because the image dimensions remain the same, there's no rounding
- Because the image dimensions remain the same, there's no rounding
to multiples of 64.
4. Attempting to outpaint larger areas will frequently give rise to ugly
- Attempting to outpaint larger areas will frequently give rise to ugly
ghosting effects.
5. For best results, try increasing the step number.
6. If you don't specify a pixel value in -D, it will default to half
- For best results, try increasing the step number.
- If you don't specify a pixel value in `-D`, it will default to half
of the whole image, which is likely not what you want.
Neither `outpaint` nor `outcrop` are perfect, but we continue to tune
and improve them. If one doesn't work, try the other. You may also
wish to experiment with other `img2img` arguments, such as `-C`, `-f`
and `-s`.
!!! tip
Neither `outpaint` nor `outcrop` are perfect, but we continue to tune
and improve them. If one doesn't work, try the other. You may also
wish to experiment with other `img2img` arguments, such as `-C`, `-f`
and `-s`.

View File

@ -1,8 +1,9 @@
---
title: Postprocessing
---
# :material-image-edit: Postprocessing
## Intro
This extension provides the ability to restore faces and upscale
@ -20,39 +21,33 @@ The default face restoration module is GFPGAN. The default upscale is
Real-ESRGAN. For an alternative face restoration module, see [CodeFormer
Support] below.
As of version 1.14, environment.yaml will install the Real-ESRGAN package into
the standard install location for python packages, and will put GFPGAN into a
subdirectory of "src" in the InvokeAI directory. (The reason for this is
that the standard GFPGAN distribution has a minor bug that adversely affects
image color.) Upscaling with Real-ESRGAN should "just work" without further
intervention. Simply pass the --upscale (-U) option on the dream> command line,
or indicate the desired scale on the popup in the Web GUI.
As of version 1.14, environment.yaml will install the Real-ESRGAN
package into the standard install location for python packages, and
will put GFPGAN into a subdirectory of "src" in the InvokeAI
directory. Upscaling with Real-ESRGAN should "just work" without
further intervention. Simply pass the --upscale (-U) option on the
invoke> command line, or indicate the desired scale on the popup in
the Web GUI.
For **GFPGAN** to work, there is one additional step needed. You will need to
download and copy the GFPGAN
[models file](https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth)
into **src/gfpgan/experiments/pretrained_models**. On Mac and Linux systems,
here's how you'd do it using **wget**:
**GFPGAN** requires a series of downloadable model files to
work. These are loaded when you run `scripts/preload_models.py`. If
GFPAN is failing with an error, please run the following from the
InvokeAI directory:
```bash
wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P src/gfpgan/experiments/pretrained_models/
python scripts/preload_models.py
```
Make sure that you're in the InvokeAI directory when you do this.
If you do not run this script in advance, the GFPGAN module will attempt
to download the models files the first time you try to perform facial
reconstruction.
Alternatively, if you have GFPGAN installed elsewhere, or if you are using an
earlier version of this package which asked you to install GFPGAN in a sibling
directory, you may use the `--gfpgan_dir` argument with `dream.py` to set a
custom path to your GFPGAN directory. _There are other GFPGAN related boot
arguments if you wish to customize further._
!!! warning "Internet connection needed"
Users whose GPU machines are isolated from the Internet (e.g.
on a University cluster) should be aware that the first time you run dream.py with GFPGAN and
Real-ESRGAN turned on, it will try to download model files from the Internet. To rectify this, you
may run `python3 scripts/preload_models.py` after you have installed GFPGAN and all its
dependencies.
Alternatively, if you have GFPGAN installed elsewhere, or if you are
using an earlier version of this package which asked you to install
GFPGAN in a sibling directory, you may use the `--gfpgan_dir` argument
with `invoke.py` to set a custom path to your GFPGAN directory. _There
are other GFPGAN related boot arguments if you wish to customize
further._
## Usage
@ -75,7 +70,7 @@ If you do not explicitly specify an upscaling_strength, it will default to 0.75.
### Face Restoration
`-G : <gfpgan_strength>`
`-G : <facetool_strength>`
This prompt argument controls the strength of the face restoration that is being
applied. Similar to upscaling, values between `0.5 to 0.8` are recommended.
@ -94,13 +89,13 @@ too.
### Example Usage
```bash
dream> superman dancing with a panda bear -U 2 0.6 -G 0.4
invoke> "superman dancing with a panda bear" -U 2 0.6 -G 0.4
```
This also works with img2img:
```bash
dream> a man wearing a pineapple hat -I path/to/your/file.png -U 2 0.5 -G 0.6
invoke> "a man wearing a pineapple hat" -I path/to/your/file.png -U 2 0.5 -G 0.6
```
!!! note
@ -124,24 +119,24 @@ actions.
This repo also allows you to perform face restoration using
[CodeFormer](https://github.com/sczhou/CodeFormer).
In order to setup CodeFormer to work, you need to download the models like with
GFPGAN. You can do this either by running `preload_models.py` or by manually
downloading the
[model file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth)
and saving it to `ldm/restoration/codeformer/weights` folder.
In order to setup CodeFormer to work, you need to download the models
like with GFPGAN. You can do this either by running
`preload_models.py` or by manually downloading the [model
file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth)
and saving it to `ldm/invoke/restoration/codeformer/weights` folder.
You can use `-ft` prompt argument to swap between CodeFormer and the default
GFPGAN. The above mentioned `-G` prompt argument will allow you to control the
strength of the restoration effect.
You can use `-ft` prompt argument to swap between CodeFormer and the
default GFPGAN. The above mentioned `-G` prompt argument will allow
you to control the strength of the restoration effect.
### Usage:
### Usage
The following command will perform face restoration with CodeFormer instead of
the default gfpgan.
`<prompt> -G 0.8 -ft codeformer`
### Other Options:
### Other Options
- `-cf` - cf or CodeFormer Fidelity takes values between `0` and `1`. 0 produces
high quality results but low accuracy and 1 produces lower quality results but
@ -167,16 +162,16 @@ previously-generated file. Just use the syntax `!fix path/to/file.png
2X for a file named `./outputs/img-samples/000044.2945021133.png`,
just run:
```
dream> !fix ./outputs/img-samples/000044.2945021133.png -G 0.8 -U 2
```bash
invoke> !fix ./outputs/img-samples/000044.2945021133.png -G 0.8 -U 2
```
A new file named `000044.2945021133.fixed.png` will be created in the output
directory. Note that the `!fix` command does not replace the original file,
unlike the behavior at generate time.
### Disabling:
### Disabling
If, for some reason, you do not wish to load the GFPGAN and/or ESRGAN libraries,
you can disable them on the dream.py command line with the `--no_restore` and
you can disable them on the invoke.py command line with the `--no_restore` and
`--no_upscale` options, respectively.

View File

@ -1,14 +1,14 @@
---
title: Prompting Features
title: Prompting-Features
---
# :octicons-command-palette-24: Prompting Features
# :octicons-command-palette-24: Prompting-Features
## **Reading Prompts from a File**
You can automate `dream.py` by providing a text file with the prompts you want to run, one line per
You can automate `invoke.py` by providing a text file with the prompts you want to run, one line per
prompt. The text file must be composed with a text editor (e.g. Notepad) and not a word processor.
Each line should look like what you would type at the dream> prompt:
Each line should look like what you would type at the invoke> prompt:
```bash
a beautiful sunny day in the park, children playing -n4 -C10
@ -16,41 +16,26 @@ stormy weather on a mountain top, goats grazing -s100
innovative packaging for a squid's dinner -S137038382
```
Then pass this file's name to `dream.py` when you invoke it:
Then pass this file's name to `invoke.py` when you invoke it:
```bash
(ldm) ~/stable-diffusion$ python3 scripts/dream.py --from_file "path/to/prompts.txt"
(invokeai) ~/stable-diffusion$ python3 scripts/invoke.py --from_file "path/to/prompts.txt"
```
You may read a series of prompts from standard input by providing a filename of `-`:
```bash
(ldm) ~/stable-diffusion$ echo "a beautiful day" | python3 scripts/dream.py --from_file -
(invokeai) ~/stable-diffusion$ echo "a beautiful day" | python3 scripts/invoke.py --from_file -
```
---
## **Weighted Prompts**
You may weight different sections of the prompt to tell the sampler to attach different levels of
priority to them, by adding `:(number)` to the end of the section you wish to up- or downweight. For
example consider this prompt:
```bash
tabby cat:0.25 white duck:0.75 hybrid
```
This will tell the sampler to invest 25% of its effort on the tabby cat aspect of the image and 75%
on the white duck aspect (surprisingly, this example actually works). The prompt weights can use any
combination of integers and floating point numbers, and they do not need to add up to 1.
---
## **Negative and Unconditioned Prompts**
Any words between a pair of square brackets will try and be ignored by Stable Diffusion's model during generation of images.
Any words between a pair of square brackets will instruct Stable
Diffusion to attempt to ban the concept from the generated image.
```bash
```text
this is a test prompt [not really] to make you understand [cool] how this works.
```
@ -62,25 +47,33 @@ original prompt:
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
<div align="center" markdown>
![step1](../assets/negative_prompt_walkthru/step1.png)
</div>
That image has a woman, so if we want the horse without a rider, we can influence the image not to have a woman by putting [woman] in the prompt, like this:
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
<div align="center" markdown>
![step2](../assets/negative_prompt_walkthru/step2.png)
</div>
That's nice - but say we also don't want the image to be quite so blue. We can add "blue" to the list of negative prompts, so it's now [woman blue]:
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
<div align="center" markdown>
![step3](../assets/negative_prompt_walkthru/step3.png)
</div>
Getting close - but there's no sense in having a saddle when our horse doesn't have a rider, so we'll add one more negative prompt: [woman blue saddle].
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue saddle]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
<div align="center" markdown>
![step4](../assets/negative_prompt_walkthru/step4.png)
</div>
!!! notes "Notes about this feature:"
@ -88,3 +81,91 @@ Getting close - but there's no sense in having a saddle when our horse doesn't h
* You can provide multiple words within the same bracket.
* You can provide multiple brackets with multiple words in different places of your prompt. That works just fine.
* To improve typical anatomy problems, you can add negative prompts like `[bad anatomy, extra legs, extra arms, extra fingers, poorly drawn hands, poorly drawn feet, disfigured, out of frame, tiling, bad art, deformed, mutated]`.
---
## **Prompt Blending**
You may blend together different sections of the prompt to explore the
AI's latent semantic space and generate interesting (and often
surprising!) variations. The syntax is:
```bash
blue sphere:0.25 red cube:0.75 hybrid
```
This will tell the sampler to blend 25% of the concept of a blue
sphere with 75% of the concept of a red cube. The blend weights can
use any combination of integers and floating point numbers, and they
do not need to add up to 1. Everything to the left of the `:XX` up to
the previous `:XX` is used for merging, so the overall effect is:
```bash
0.25 * "blue sphere" + 0.75 * "white duck" + hybrid
```
Because you are exploring the "mind" of the AI, the AI's way of mixing
two concepts may not match yours, leading to surprising effects. To
illustrate, here are three images generated using various combinations
of blend weights. As usual, unless you fix the seed, the prompts will give you
different results each time you run them.
---
<div align="center" markdown>
### "blue sphere, red cube, hybrid"
</div>
This example doesn't use melding at all and represents the default way
of mixing concepts.
<div align="center" markdown>
![blue-sphere-red-cube-hyprid](../assets/prompt-blending/blue-sphere-red-cube-hybrid.png)
</div>
It's interesting to see how the AI expressed the concept of "cube" as
the four quadrants of the enclosing frame. If you look closely, there
is depth there, so the enclosing frame is actually a cube.
<div align="center" markdown>
### "blue sphere:0.25 red cube:0.75 hybrid"
![blue-sphere-25-red-cube-75](../assets/prompt-blending/blue-sphere-0.25-red-cube-0.75-hybrid.png)
</div>
Now that's interesting. We get neither a blue sphere nor a red cube,
but a red sphere embedded in a brick wall, which represents a melding
of concepts within the AI's "latent space" of semantic
representations. Where is Ludwig Wittgenstein when you need him?
<div align="center" markdown>
### "blue sphere:0.75 red cube:0.25 hybrid"
![blue-sphere-75-red-cube-25](../assets/prompt-blending/blue-sphere-0.75-red-cube-0.25-hybrid.png)
</div>
Definitely more blue-spherey. The cube is gone entirely, but it's
really cool abstract art.
<div align="center" markdown>
### "blue sphere:0.5 red cube:0.5 hybrid"
![blue-sphere-5-red-cube-5-hybrid](../assets/prompt-blending/blue-sphere-0.5-red-cube-0.5-hybrid.png)
</div>
Whoa...! I see blue and red, but no spheres or cubes. Is the word
"hybrid" summoning up the concept of some sort of scifi creature?
Let's find out.
<div align="center" markdown>
### "blue sphere:0.5 red cube:0.5"
![blue-sphere-5-red-cube-5](../assets/prompt-blending/blue-sphere-0.5-red-cube-0.5.png)
</div>
Indeed, removing the word "hybrid" produces an image that is more like
what we'd expect.
In conclusion, prompt blending is great for exploring creative space,
but can be difficult to direct. A forthcoming release of InvokeAI will
feature more deterministic prompt weighting.

View File

@ -1,8 +1,8 @@
---
title: TEXTUAL_INVERSION
title: Textual-Inversion
---
# :material-file-document-plus-outline: TEXTUAL_INVERSION
# :material-file-document: Textual Inversion
## **Personalizing Text-to-Image Generation**
@ -23,13 +23,13 @@ As the default backend is not available on Windows, if you're using that
platform, set the environment variable `PL_TORCH_DISTRIBUTED_BACKEND` to `gloo`
```bash
python3 ./main.py --base ./configs/stable-diffusion/v1-finetune.yaml \
--actual_resume ./models/ldm/stable-diffusion-v1/model.ckpt \
-t \
-n my_cat \
--gpus 0 \
--data_root D:/textual-inversion/my_cat \
--init_word 'cat'
python3 ./main.py -t \
--base ./configs/stable-diffusion/v1-finetune.yaml \
--actual_resume ./models/ldm/stable-diffusion-v1/model.ckpt \
-n my_cat \
--gpus 0 \
--data_root D:/textual-inversion/my_cat \
--init_word 'cat'
```
During the training process, files will be created in
@ -56,22 +56,23 @@ configs/stable_diffusion/v1-finetune.yaml (currently set to 4000000)
## **Run the Model**
Once the model is trained, specify the trained .pt or .bin file when starting
dream using
invoke using
```bash
python3 ./scripts/dream.py --embedding_path /path/to/embedding.pt
python3 ./scripts/invoke.py \
--embedding_path /path/to/embedding.pt
```
Then, to utilize your subject at the dream prompt
Then, to utilize your subject at the invoke prompt
```bash
dream> "a photo of *"
invoke> "a photo of *"
```
This also works with image2image
```bash
dream> "waterfall and rainbow in the style of *" --init_img=./init-images/crude_drawing.png --strength=0.5 -s100 -n4
invoke> "waterfall and rainbow in the style of *" --init_img=./init-images/crude_drawing.png --strength=0.5 -s100 -n4
```
For .pt files it's also possible to train multiple tokens (modify the
@ -80,9 +81,9 @@ LDM checkpoints using:
```bash
python3 ./scripts/merge_embeddings.py \
--manager_ckpts /path/to/first/embedding.pt \
[</path/to/second/embedding.pt>,[...]] \
--output_path /path/to/output/embedding.pt
--manager_ckpts /path/to/first/embedding.pt \
[</path/to/second/embedding.pt>,[...]] \
--output_path /path/to/output/embedding.pt
```
Credit goes to rinongal and the repository

View File

@ -25,16 +25,17 @@ variations to create the desired image of Xena, Warrior Princess.
## Step 1 -- Find a base image that you like
The prompt we will use throughout is
`lucy lawless as xena, warrior princess, character portrait, high resolution.`
The prompt we will use throughout is:
This will be indicated as `prompt` in the examples below.
`#!bash "lucy lawless as xena, warrior princess, character portrait, high resolution."`
This will be indicated as `#!bash "prompt"` in the examples below.
First we let SD create a series of images in the usual way, in this case
requesting six iterations:
```bash
dream> lucy lawless as xena, warrior princess, character portrait, high resolution -n6
invoke> lucy lawless as xena, warrior princess, character portrait, high resolution -n6
...
Outputs:
./outputs/Xena/000001.1579445059.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -S1579445059
@ -45,7 +46,10 @@ Outputs:
./outputs/Xena/000001.3357757885.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -S3357757885
```
<figure markdown>
![var1](../assets/variation_walkthru/000001.3357757885.png)
<figcaption> Seed 3357757885 looks nice </figcaption>
</figure>
---
@ -57,7 +61,7 @@ differing by a variation amount of 0.2. This number ranges from `0` to `1.0`,
with higher numbers being larger amounts of variation.
```bash
dream> "prompt" -n6 -S3357757885 -v0.2
invoke> "prompt" -n6 -S3357757885 -v0.2
...
Outputs:
./outputs/Xena/000002.784039624.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 784039624:0.2 -S3357757885
@ -77,9 +81,15 @@ used to generate it.
This gives us a series of closely-related variations, including the two shown
here.
<figure markdown>
![var2](../assets/variation_walkthru/000002.3647897225.png)
<figcaption>subseed 3647897225</figcaption>
</figure>
<figure markdown>
![var3](../assets/variation_walkthru/000002.1614299449.png)
<figcaption>subseed 1614299449</figcaption>
</figure>
I like the expression on Xena's face in the first one (subseed 3647897225), and
the armor on her shoulder in the second one (subseed 1614299449). Can we combine
@ -89,7 +99,7 @@ We combine the two variations using `-V` (`--with_variations`). Again, we must
provide the seed for the originally-chosen image in order for this to work.
```bash
dream> "prompt" -S3357757885 -V3647897225,0.1,1614299449,0.1
invoke> "prompt" -S3357757885 -V3647897225,0.1,1614299449,0.1
Outputs:
./outputs/Xena/000003.1614299449.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1 -S3357757885
```
@ -97,7 +107,10 @@ Outputs:
Here we are providing equal weights (0.1 and 0.1) for both the subseeds. The
resulting image is close, but not exactly what I wanted:
<figure markdown>
![var4](../assets/variation_walkthru/000003.1614299449.png)
<figcaption> subseed 1614299449 </figcaption>
</figure>
We could either try combining the images with different weights, or we can
generate more variations around the almost-but-not-quite image. We do the
@ -105,7 +118,7 @@ latter, using both the `-V` (combining) and `-v` (variation strength) options.
Note that we use `-n6` to generate 6 variations:
```bash
dream> "prompt" -S3357757885 -V3647897225,0.1,1614299449,0.1 -v0.05 -n6
invoke> "prompt" -S3357757885 -V3647897225,0.1,1614299449,0.1 -v0.05 -n6
Outputs:
./outputs/Xena/000004.3279757577.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,3279757577:0.05 -S3357757885
./outputs/Xena/000004.2853129515.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -V 3647897225:0.1,1614299449:0.1,2853129515:0.05 -S3357757885
@ -118,8 +131,23 @@ Outputs:
This produces six images, all slight variations on the combination of the chosen
two images. Here's the one I like best:
<figure markdown>
![var5](../assets/variation_walkthru/000004.3747154981.png)
<figcaption> subseed 3747154981 </figcaption>
</figure>
As you can see, this is a very powerful tool, which when combined with subprompt
weighting, gives you great control over the content and quality of your
generated images.
## Variations and Samplers
The sampler you choose has a strong effect on variation strength. Some
samplers, such as `k_euler_a` are very "creative" and produce significant
amounts of image-to-image variation even when the seed is fixed and the
`-v` argument is very low. Others are more deterministic. Feel free to
experiment until you find the combination that you like.
Also be aware of the [Perlin Noise](OTHER.md#thresholding-and-perlin-noise-initialization-options)
feature, which provides another way of introducing variability into your
image generation requests.

View File

@ -1,21 +1,369 @@
---
title: Barebones Web Server
title: InvokeAI Web Server
---
# :material-web: Barebones Web Server
# :material-web: InvokeAI Web Server
As of version 1.10, this distribution comes with a bare bones web server (see
screenshot). To use it, run the `dream.py` script by adding the `--web`
option.
As of version 2.0.0, this distribution comes with a full-featured web
server (see screenshot). To use it, run the `invoke.py` script by
adding the `--web` option:
```bash
(ldm) ~/stable-diffusion$ python3 scripts/dream.py --web
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web
```
You can then connect to the server by pointing your web browser at
http://localhost:9090, or to the network name or IP address of the server.
http://localhost:9090. To reach the server from a different machine on
your LAN, you may launch the web server with the `--host` argument and
either the IP address of the host you are running it on, or the
wildcard `0.0.0.0`. For example:
Kudos to [Tesseract Cat](https://github.com/TesseractCat) for contributing this
code, and to [dagf2101](https://github.com/dagf2101) for refining it.
```bash
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web --host 0.0.0.0
```
![Dream Web Server](../assets/dream_web_server.png)
# Quick guided walkthrough of the WebGUI's features
While most of the WebGUI's features are intuitive, here is a guided
walkthrough through its various components.
![Invoke Web Server - Major Components](../assets/invoke-web-server-1.png){:width="640px"}
The screenshot above shows the Text to Image tab of the WebGUI. There
are three main sections:
1. A **control panel** on the left, which contains various settings
for text to image generation. The most important part is the text
field (currently showing `strawberry sushi`) for entering the text
prompt, and the camera icon directly underneath that will render the
image. We'll call this the *Invoke* button from now on.
2. The **current image** section in the middle, which shows a large
format version of the image you are currently working on. A series of
buttons at the top ("image to image", "Use All", "Use Seed", etc) lets
you modify the image in various ways.
3. A **gallery* section on the left that contains a history of the
images you have generated. These images are read and written to the
directory specified at launch time in `--outdir`.
In addition to these three elements, there are a series of icons for
changing global settings, reporting bugs, and changing the theme on
the upper right.
There are also a series of icons to the left of the control panel (see
highlighted area in the screenshot below) which select among a series
of tabs for performing different types of operations.
<figure markdown>
![Invoke Web Server - Control Panel](../assets/invoke-web-server-2.png){:width="512px"}
</figure>
From top to bottom, these are:
1. Text to Image - generate images from text
2. Image to Image - from an uploaded starting image (drawing or photograph) generate a new one, modified by the text prompt
3. Inpainting (pending) - Interactively erase portions of a starting image and have the AI fill in the erased region from a text prompt.
4. Outpainting (pending) - Interactively add blank space to the borders of a starting image and fill in the background from a text prompt.
5. Postprocessing (pending) - Interactively postprocess generated images using a variety of filters.
The inpainting, outpainting and postprocessing tabs are currently in
development. However, limited versions of their features can already
be accessed through the Text to Image and Image to Image tabs.
## Walkthrough
The following walkthrough will exercise most (but not all) of the
WebGUI's feature set.
### Text to Image
1. Launch the WebGUI using `python scripts/invoke.py --web` and
connect to it with your browser by accessing
`http://localhost:9090`. If the browser and server are running on
different machines on your LAN, add the option `--host 0.0.0.0` to the
launch command line and connect to the machine hosting the web server
using its IP address or domain name.
2. If all goes well, the WebGUI should come up and you'll see a green
`connected` message on the upper right.
#### Basics
1. Generate an image by typing *strawberry sushi* into the large
prompt field on the upper left and then clicking on the Invoke button
(the one with the Camera icon). After a short wait, you'll see a large
image of sushi in the image panel, and a new thumbnail in the gallery
on the right.
If you need more room on the screen, you can turn the gallery off
by clicking on the **x** to the right of "Your Invocations". You can
turn it back on later by clicking the image icon that appears in the
gallery's place.
The images are written into the directory indicated by the `--outdir`
option provided at script launch time. By default, this is
`outputs/img-samples` under the InvokeAI directory.
2. Generate a bunch of strawberry sushi images by increasing the
number of requested images by adjusting the Images counter just below
the Camera button. As each is generated, it will be added to the
gallery. You can switch the active image by clicking on the gallery
thumbnails.
3. Try playing with different settings, including image width and
height, the Sampler, the Steps and the CFG scale.
Image *Width* and *Height* do what you'd expect. However, be aware that
larger images consume more VRAM memory and take longer to generate.
The *Sampler* controls how the AI selects the image to display. Some
samplers are more "creative" than others and will produce a wider
range of variations (see next section). Some samplers run faster than
others.
*Steps* controls how many noising/denoising/sampling steps the AI will
take. The higher this value, the more refined the image will be, but
the longer the image will take to generate. A typical strategy is to
generate images with a low number of steps in order to select one to
work on further, and then regenerate it using a higher number of
steps.
The *CFG Scale* controls how hard the AI tries to match the generated
image to the input prompt. You can go as high or low as you like, but
generally values greater than 20 won't improve things much, and values
lower than 5 will produce unexpected images. There are complex
interactions between *Steps*, *CFG Scale* and the *Sampler*, so
experiment to find out what works for you.
6. To regenerate a previously-generated image, select the image you
want and click *Use All*. This loads the text prompt and other
original settings into the control panel. If you then press *Invoke*
it will regenerate the image exactly. You can also selectively modify
the prompt or other settings to tweak the image.
Alternatively, you may click on *Use Seed* to load just the image's
seed, and leave other settings unchanged.
7. To regenerate a Stable Diffusion image that was generated by
another SD package, you need to know its text prompt and its
*Seed*. Copy-paste the prompt into the prompt box, unset the
*Randomize Seed* control in the control panel, and copy-paste the
desired *Seed* into its text field. When you Invoke, you will get
something similar to the original image. It will not be exact unless
you also set the correct values for the original sampler, CFG,
steps and dimensions, but it will (usually) be close.
#### Variations on a theme
1. Let's try generating some variations. Select your favorite sushi
image from the gallery to load it. Then select "Use All" from the list
of buttons above. This will load up all the settings used to generate
this image, including its unique seed.
Go down to the Variations section of the Control Panel and set the
button to On. Set Variation Amount to 0.2 to generate a modest
number of variations on the image, and also set the Image counter to
`4`. Press the `invoke` button. This will generate a series of related
images. To obtain smaller variations, just lower the Variation
Amount. You may also experiment with changing the Sampler. Some
samplers generate more variability than others. *k_euler_a* is
particularly creative, while *ddim* is pretty conservative.
2. For even more variations, experiment with increasing the setting
for *Perlin*. This adds a bit of noise to the image generation
process. Note that values of Perlin noise greater than 0.15 produce
poor images for several of the samplers.
#### Facial reconstruction and upscaling
Stable Diffusion frequently produces mangled faces, particularly when
there are multiple figures in the same scene. Stable Diffusion has
particular issues with generating reallistic eyes. InvokeAI provides
the ability to reconstruct faces using either the GFPGAN or CodeFormer
libraries. For more information see [POSTPROCESS](POSTPROCESS.md).
1. Invoke a prompt that generates a mangled face. A prompt that often
gives this is "portrait of a lawyer, 3/4 shot" (this is not intended
as a slur against lawyers!) Once you have an image that needs some
touching up, load it into the Image panel, and press the button with
the face icon (highlighted in the first screenshot below). A dialog
box will appear. Leave *Strength* at 0.8 and press *Restore Faces". If
all goes well, the eyes and other aspects of the face will be improved
(see the second screenshot)
![Invoke Web Server - Original Image](../assets/invoke-web-server-3.png)
![Invoke Web Server - Retouched Image](../assets/invoke-web-server-4.png)
The facial reconstruction *Strength* field adjusts how aggressively
the face library will try to alter the face. It can be as high as 1.0,
but be aware that this often softens the face airbrush style, losing
some details. The default 0.8 is usually sufficient.
2. "Upscaling" is the process of increasing the size of an image while
retaining the sharpness. InvokeAI uses an external library called
"ESRGAN" to do this. To invoke upscaling, simply select an image and
press the *HD* button above it. You can select between 2X and 4X
upscaling, and adjust the upscaling strength, which has much the same
meaning as in facial reconstruction. Try running this on one of your
previously-generated images.
3. Finally, you can run facial reconstruction and/or upscaling
automatically after each Invocation. Go to the Advanced Options
section of the Control Panel and turn on *Restore Face* and/or
*Upscale*.
### Image to Image
InvokeAI lets you take an existing image and use it as the basis for a
new creation. You can use any sort of image, including a photograph, a
scanned sketch, or a digital drawing, as long as it is in PNG or JPEG
format.
For this tutorial, we'll use files named
[Lincoln-and-Parrot-512.png](../assets/Lincoln-and-Parrot-512.png),
and
[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png).
Download these images to your local machine now to continue with the walkthrough.
1. Click on the *Image to Image* tab icon, which is the second icon
from the top on the left-hand side of the screen:
<figure markdown>
![Invoke Web Server - Image to Image Icon](../assets/invoke-web-server-5.png)
</figure>
This will bring you to a screen similar to the one shown here:
<figure markdown>
![Invoke Web Server - Image to Image Tab](../assets/invoke-web-server-6.png){:width="640px"}
</figure>
2. Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or
click the blank area to get an upload dialog. The image will load into
an area marked *Initial Image*. (The WebGUI will also load the most
recently-generated image from the gallery into a section on the left,
but this image will be replaced in the next step.)
3. Go to the prompt box and type *old sea captain with raven on
shoulder* and press Invoke. A derived image will appear to the right
of the original one:
![Invoke Web Server - Image to Image example](../assets/invoke-web-server-7.png){:width="640px"}
4. Experiment with the different settings. The most influential one
in Image to Image is *Image to Image Strength* located about midway
down the control panel. By default it is set to 0.75, but can range
from 0.0 to 0.99. The higher the value, the more of the original image
the AI will replace. A value of 0 will leave the initial image
completely unchanged, while 0.99 will replace it completely. However,
the Sampler and CFG Scale also influence the final result. You can
also generate variations in the same way as described in Text to
Image.
5. What if we only want to change certain part(s) of the image and
leave the rest intact? This is called Inpainting, and a future version
of the InvokeAI web server will provide an interactive painting canvas
on which you can directly draw the areas you wish to Inpaint into. For
now, you can achieve this effect by using an external photoeditor tool
to make one or more regions of the image transparent as described in
[INPAINTING.md] and uploading that.
The file
[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png)
is a version of the earlier image in which the area around the parrot
has been replaced with transparency. Click on the "x" in the upper
right of the Initial Image and upload the transparent version. Using
the same prompt "old sea captain with raven on shoulder" try Invoking
an image. This time, only the parrot will be replaced, leaving the
rest of the original image intact:
<figure markdown>
![Invoke Web Server - Inpainting](../assets/invoke-web-server-8.png){:width="640px"}
</figure>
6. Would you like to modify a previously-generated image using the
Image to Image facility? Easy! While in the Image to Image panel,
hover over any of the gallery images to see a little menu of icons pop
up. Click the picture icon to instantly send the selected image to
Image to Image as the initial image.
You can do the same from the Text to Image tab by clicking on the
picture icon above the central image panel. The screenshot below
shows where the "use as initial image" icons are located.
![Invoke Web Server - Use as Image Links](../assets/invoke-web-server-9.png){:width="640px"}
## Parting remarks
This concludes the walkthrough, but there are several more features that you
can explore. Please check out the [Command Line Interface](CLI.md)
documentation for further explanation of the advanced features that
were not covered here.
The WebGUI is only rapid development. Check back regularly for
updates!
## Reference
### Additional Options
parameter <img width=160 align="right"> | effect
-- | --
`--web_develop` | Starts the web server in development mode.
`--web_verbose` | Enables verbose logging
`--cors [CORS ...]` | Additional allowed origins, comma-separated
`--host HOST` | Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network.
`--port PORT` | Web server: Port to listen on
`--gui` | Start InvokeAI GUI - This is the "desktop mode" version of the web app. It uses Flask to create a desktop app experience of the webserver.
### Web Specific Features
The web experience offers an incredibly easy-to-use experience for interacting with the InvokeAI toolkit.
For detailed guidance on individual features, see the Feature-specific help documents available in this directory.
Note that the latest functionality available in the CLI may not always be available in the Web interface.
#### Dark Mode & Light Mode
The InvokeAI interface is available in a nano-carbon black & purple Dark Mode, and a "burn your eyes out Nosferatu" Light Mode. These can be toggled by clicking the Sun/Moon icons at the top right of the interface.
![InvokeAI Web Server - Dark Mode](../assets/invoke_web_dark.png)
![InvokeAI Web Server - Light Mode](../assets/invoke_web_light.png)
#### Invocation Toolbar
The left side of the InvokeAI interface is available for customizing the prompt and the settings used for invoking your new image. Typing your prompt into the open text field and clicking the Invoke button will produce the image based on the settings configured in the toolbar.
See below for additional documentation related to each feature:
- [Core Prompt Settings](./CLI.md)
- [Variations](./VARIATIONS.md)
- [Upscaling](./POSTPROCESS.md#upscaling)
- [Image to Image](./IMG2IMG.md)
- [Inpainting](./INPAINTING.md)
- [Other](./OTHER.md)
#### Invocation Gallery
The currently selected --outdir (or the default outputs folder) will display all previously generated files on load. As new invocations are generated, these will be dynamically added to the gallery, and can be previewed by selecting them. Each image also has a simple set of actions (e.g., Delete, Use Seed, Use All Parameters, etc.) that can be accessed by hovering over the image.
#### Image Workspace
When an image from the Invocation Gallery is selected, or is generated, the image will be displayed within the center of the interface. A quickbar of common image interactions are displayed along the top of the image, including:
- Use image in the `Image to Image` workflow
- Initialize Face Restoration on the selected file
- Initialize Upscaling on the selected file
- View File metadata and details
- Delete the file
## Acknowledgements
A huge shout-out to the core team working to make this vision a
reality, including
[psychedelicious](https://github.com/psychedelicious),
[Kyle0654](https://github.com/Kyle0654) and
[blessedcoolant](https://github.com/blessedcoolant). [hipsterusername](https://github.com/hipsterusername)
was the team's unofficial cheerleader and added tooltips/docs.

View File

@ -1,8 +1,8 @@
---
title: SAMPLER CONVERGENCE
title: Sampler Convergence
---
## *Sampler Convergence*
# :material-palette-advanced: *Sampler Convergence*
As features keep increasing, making the right choices for your needs can become increasingly difficult. What sampler to use? And for how many steps? Do you change the CFG value? Do you use prompt weighting? Do you allow variations?
@ -14,12 +14,14 @@ In this document, we will talk about sampler convergence.
Looking for a short version? Here's a TL;DR in 3 tables.
| Remember |
|:---|
| Results converge as steps (`-s`) are increased (except for `K_DPM_2_A` and `K_EULER_A`). Often at ≥ `-s100`, but may require ≥ `-s700`). |
| Producing a batch of candidate images at low (`-s8` to `-s30`) step counts can save you hours of computation. |
| `K_HEUN` and `K_DPM_2` converge in less steps (but are slower). |
| `K_DPM_2_A` and `K_EULER_A` incorporate a lot of creativity/variability. |
!!! note "Remember"
- Results converge as steps (`-s`) are increased (except for `K_DPM_2_A` and `K_EULER_A`). Often at ≥ `-s100`, but may require ≥ `-s700`).
- Producing a batch of candidate images at low (`-s8` to `-s30`) step counts can save you hours of computation.
- `K_HEUN` and `K_DPM_2` converge in less steps (but are slower).
- `K_DPM_2_A` and `K_EULER_A` incorporate a lot of creativity/variability.
<div align="center" markdown>
| Sampler | (3 sample avg) it/s (M1 Max 64GB, 512x512) |
|---|---|
@ -32,10 +34,13 @@ Looking for a short version? Here's a TL;DR in 3 tables.
| `K_DPM_2_A` | 0.95 (slower) |
| `K_EULER_A` | 1.86 |
| Suggestions |
|:---|
| For most use cases, `K_LMS`, `K_HEUN` and `K_DPM_2` are the best choices (the latter 2 run 0.5x as quick, but tend to converge 2x as quick as `K_LMS`). At very low steps (≤ `-s8`), `K_HEUN` and `K_DPM_2` are not recommended. Use `K_LMS` instead.|
| For variability, use `K_EULER_A` (runs 2x as quick as `K_DPM_2_A`). |
</div>
!!! tip "suggestions"
For most use cases, `K_LMS`, `K_HEUN` and `K_DPM_2` are the best choices (the latter 2 run 0.5x as quick, but tend to converge 2x as quick as `K_LMS`). At very low steps (≤ `-s8`), `K_HEUN` and `K_DPM_2` are not recommended. Use `K_LMS` instead.
For variability, use `K_EULER_A` (runs 2x as quick as `K_DPM_2_A`).
---
@ -60,15 +65,15 @@ This realization is very useful because it means you don't need to create a batc
You can produce the same 100 images at `-s10` to `-s30` using a K-sampler (since they converge faster), get a rough idea of the final result, choose your 2 or 3 favorite ones, and then run `-s100` on those images to polish some details.
The latter technique is 3-8x as quick.
Example:
!!! example
At 60s per 100 steps.
At 60s per 100 steps.
(Option A) 60s * 100 images = 6000s (100 images at `-s100`, manually picking 3 favorites)
A) 60s * 100 images = 6000s (100 images at `-s100`, manually picking 3 favorites)
(Option B) 6s * 100 images + 60s * 3 images = 780s (100 images at `-s10`, manually picking 3 favorites, and running those 3 at `-s100` to polish details)
B) 6s *100 images + 60s* 3 images = 780s (100 images at `-s10`, manually picking 3 favorites, and running those 3 at `-s100` to polish details)
The result is 1 hour and 40 minutes (Option A) vs 13 minutes (Option B).
The result is __1 hour and 40 minutes__ for Variant A, vs __13 minutes__ for Variant B.
### *Topic convergance*
@ -110,9 +115,12 @@ Note also the point of convergence may not be the most desirable state (e.g. I p
Once we understand the concept of sampler convergence, we must look into the performance of each sampler in terms of steps (iterations) per second, as not all samplers run at the same speed.
On my M1 Max with 64GB of RAM, for a 512x512 image:
| Sampler | (3 sample average) it/s |
|---|---|
<div align="center" markdown>
On my M1 Max with 64GB of RAM, for a 512x512 image
| Sampler | (3 sample average) it/s |
| :--- | :--- |
| `DDIM` | 1.89 |
| `PLMS` | 1.86 |
| `K_EULER` | 1.86 |
@ -122,11 +130,13 @@ On my M1 Max with 64GB of RAM, for a 512x512 image:
| `K_DPM_2_A` | 0.95 (slower) |
| `K_EULER_A` | 1.86 |
</div>
Combining our results with the steps per second of each sampler, three choices come out on top: `K_LMS`, `K_HEUN` and `K_DPM_2` (where the latter two run 0.5x as quick but tend to converge 2x as quick as `K_LMS`). For creativity and a lot of variation between iterations, `K_EULER_A` can be a good choice (which runs 2x as quick as `K_DPM_2_A`).
Additionally, image generation at very low steps (≤ `-s8`) is not recommended for `K_HEUN` and `K_DPM_2`. Use `K_LMS` instead.
<img width="397" alt="192044949-67d5d441-a0d5-4d5a-be30-5dda4fc28a00-min" src="https://user-images.githubusercontent.com/50542132/192046823-2714cb29-bbf3-4eb1-9213-e27a0963905c.png">
![K-compare](https://user-images.githubusercontent.com/50542132/192046823-2714cb29-bbf3-4eb1-9213-e27a0963905c.png){ width=600}
### *Three key points*

View File

@ -1,5 +1,7 @@
---
title: F.A.Q.
hide:
- toc
---
# :material-frequently-asked-questions: F.A.Q.
@ -51,7 +53,7 @@ rm ${PIP_LOG}
### **QUESTION**
`dream.py` crashes with the complaint that it can't find `ldm.simplet2i.py`. Or it complains that
`invoke.py` crashes with the complaint that it can't find `ldm.simplet2i.py`. Or it complains that
function is being passed incorrect parameters.
### **SOLUTION**
@ -63,7 +65,7 @@ Reinstall the stable diffusion modules. Enter the `stable-diffusion` directory a
### **QUESTION**
`dream.py` dies, complaining of various missing modules, none of which starts with `ldm``.
`invoke.py` dies, complaining of various missing modules, none of which starts with `ldm`.
### **SOLUTION**
@ -87,9 +89,7 @@ Usually this will be sufficient, but if you start to see errors about
missing or incorrect modules, use the command `pip install -e .`
and/or `conda env update` (These commands won't break anything.)
`pip install -e .` and/or
`conda env update -f environment.yaml`
`pip install -e .` and/or `conda env update -f environment.yaml`
(These commands won't break anything.)

View File

@ -1,6 +1,5 @@
---
title: Home
template: main.html
---
<!--
@ -13,7 +12,7 @@ template: main.html
-->
<div align="center" markdown>
# :material-script-text-outline: Stable Diffusion Dream Script
# ^^**InvokeAI: A Stable Diffusion Toolkit**^^ :tools: <br> <small>Formerly known as lstein/stable-diffusion</small>
![project logo](assets/logo.png)
@ -25,37 +24,41 @@ template: main.html
[![github open issues badge]][github open issues link] [![github open prs badge]][github open prs link]
[CI checks on dev badge]: https://flat.badgen.net/github/checks/lstein/stable-diffusion/development?label=CI%20status%20on%20dev&cache=900&icon=github
[CI checks on dev link]: https://github.com/lstein/stable-diffusion/actions?query=branch%3Adevelopment
[CI checks on main badge]: https://flat.badgen.net/github/checks/lstein/stable-diffusion/main?label=CI%20status%20on%20main&cache=900&icon=github
[CI checks on main link]: https://github.com/lstein/stable-diffusion/actions/workflows/test-dream-conda.yml
[discord badge]: https://flat.badgen.net/discord/members/htRgbc7e?icon=discord
[discord link]: https://discord.com/invite/htRgbc7e
[github forks badge]: https://flat.badgen.net/github/forks/lstein/stable-diffusion?icon=github
[CI checks on dev badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/development?label=CI%20status%20on%20dev&cache=900&icon=github
[CI checks on dev link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
[discord link]: https://discord.gg/ZmtBAhwWhy
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
[github forks link]: https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
[github open issues badge]: https://flat.badgen.net/github/open-issues/lstein/stable-diffusion?icon=github
[github open issues link]: https://github.com/lstein/stable-diffusion/issues?q=is%3Aissue+is%3Aopen
[github open prs badge]: https://flat.badgen.net/github/open-prs/lstein/stable-diffusion?icon=github
[github open prs link]: https://github.com/lstein/stable-diffusion/pulls?q=is%3Apr+is%3Aopen
[github stars badge]: https://flat.badgen.net/github/stars/lstein/stable-diffusion?icon=github
[github stars link]: https://github.com/lstein/stable-diffusion/stargazers
[latest commit to dev badge]: https://flat.badgen.net/github/last-commit/lstein/stable-diffusion/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
[latest commit to dev link]: https://github.com/lstein/stable-diffusion/commits/development
[latest release badge]: https://flat.badgen.net/github/release/lstein/stable-diffusion/development?icon=github
[latest release link]: https://github.com/lstein/stable-diffusion/releases
[github open issues badge]: https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
[github open issues link]: https://github.com/invoke-ai/InvokeAI/issues?q=is%3Aissue+is%3Aopen
[github open prs badge]: https://flat.badgen.net/github/open-prs/invoke-ai/InvokeAI?icon=github
[github open prs link]: https://github.com/invoke-ai/InvokeAI/pulls?q=is%3Apr+is%3Aopen
[github stars badge]: https://flat.badgen.net/github/stars/invoke-ai/InvokeAI?icon=github
[github stars link]: https://github.com/invoke-ai/InvokeAI/stargazers
[latest commit to dev badge]: https://flat.badgen.net/github/last-commit/invoke-ai/InvokeAI/development?icon=github&color=yellow&label=last%20dev%20commit&cache=900
[latest commit to dev link]: https://github.com/invoke-ai/InvokeAI/commits/development
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
</div>
This is a fork of [CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion), the open
source text-to-image generator. It provides a streamlined process with various new features and
options to aid the image generation process. It runs on Windows, Mac and Linux machines, and runs on
GPU cards with as little as 4 GB or RAM.
<a href="https://github.com/invoke-ai/InvokeAI">InvokeAI</a> is an
implementation of Stable Diffusion, the open source text-to-image and
image-to-image generator. It provides a streamlined process with
various new features and options to aid the image generation
process. It runs on Windows, Mac and Linux machines, and runs on GPU
cards with as little as 4 GB or RAM.
**Quick links**: [<a href="https://discord.gg/NwVCmKwY">Discord Server</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
!!! note
This fork is rapidly evolving. Please use the
[Issues](https://github.com/lstein/stable-diffusion/issues) tab to report bugs and make feature
requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
This fork is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
## :octicons-package-dependencies-24: Installation
@ -81,25 +84,46 @@ You wil need one of the following:
### :fontawesome-regular-hard-drive: Disk
- At least 6 GB of free disk space for the machine learning model, Python, and all its dependencies.
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
!!! note
If you are have a Nvidia 10xx series card (e.g. the 1080ti), please run the dream script in
If you are have a Nvidia 10xx series card (e.g. the 1080ti), please run the invoke script in
full-precision mode as shown below.
Similarly, specify full-precision mode on Apple M1 hardware.
To run in full-precision mode, start `dream.py` with the `--full_precision` flag:
To run in full-precision mode, start `invoke.py` with the `--full_precision` flag:
```bash
(ldm) ~/stable-diffusion$ python scripts/dream.py --full_precision
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
```
## :octicons-log-16: Latest Changes
### vNEXT <small>(TODO 2022)</small>
- Deprecated `--full_precision` / `-F`. Simply omit it and `dream.py` will auto
### v2.0.0 <small>(9 October 2022)</small>
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
for backward compatibility.
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/INPAINTING.md">inpainting</a> and <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OUTPAINTING.md">outpainting</a>
- img2img runs on all k* samplers
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/PROMPTS.md#negative-and-unconditioned-prompts">negative prompts</a>
- Support for CodeFormer face reconstruction
- Support for Textual Inversion on Macintoshes
- Support in both WebGUI and CLI for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/POSTPROCESS.md">post-processing of previously-generated images</a>
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
and "embiggen" upscaling. See the `!fix` command.
- New `--hires` option on `invoke>` line allows <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.m#this-is-an-example-of-txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
- New `--perlin` and `--threshold` options allow you to add and control variation
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
and tweaking of previous settings.
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
- Improved <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.m">command-line completion behavior</a>.
New commands added:
* List command-line history with `!history`
* Search command-line history with `!search`
* Clear history with `!clear`
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
configure. To switch away from auto use the new flag like `--precision=float32`.
### v1.14 <small>(11 September 2022)</small>
@ -124,7 +148,7 @@ You wil need one of the following:
[Kevin Gibbons](https://github.com/bakkot)
- A new configuration file scheme that allows new models (including upcoming stable-diffusion-v1.5)
to be added without altering the code. ([David Wager](https://github.com/maddavid12))
- Can specify --grid on dream.py command line as the default.
- Can specify --grid on invoke.py command line as the default.
- Miscellaneous internal bug and stability fixes.
- Works on M1 Apple hardware.
- Multiple bug fixes.

View File

@ -1,4 +1,10 @@
# Before you begin
---
title: Docker
---
# :fontawesome-brands-docker: Docker
## Before you begin
- For end users: Install Stable Diffusion locally using the instructions for
your OS.
@ -6,7 +12,7 @@
deployment to other environments (on-premises or cloud), follow these
instructions. For general use, install locally to leverage your machine's GPU.
# Why containers?
## Why containers?
They provide a flexible, reliable way to build and deploy Stable Diffusion.
You'll also use a Docker volume to store the largest model files and image
@ -26,11 +32,11 @@ development purposes it's fine. Once you're done with development tasks on your
laptop you can build for the target platform and architecture and deploy to
another environment with NVIDIA GPUs on-premises or in the cloud.
# Installation on a Linux container
## Installation on a Linux container
## Prerequisites
### Prerequisites
### Get the data files
#### Get the data files
Go to
[Hugging Face](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original),
@ -44,14 +50,14 @@ cd ~/Downloads
wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth
```
### Install [Docker](https://github.com/santisbon/guides#docker)
#### Install [Docker](https://github.com/santisbon/guides#docker)
On the Docker Desktop app, go to Preferences, Resources, Advanced. Increase the
CPUs and Memory to avoid this
[Issue](https://github.com/invoke-ai/InvokeAI/issues/342). You may need to
increase Swap and Disk image size too.
## Setup
### Setup
Set the fork you want to use and other variables.
@ -132,11 +138,11 @@ docker run -it \
$TAG_STABLE_DIFFUSION
```
# Usage (time to have fun)
## Usage (time to have fun)
## Startup
### Startup
If you're on a **Linux container** the `dream` script is **automatically
If you're on a **Linux container** the `invoke` script is **automatically
started** and the output dir set to the Docker volume you created earlier.
If you're **directly on macOS follow these startup instructions**.
@ -148,17 +154,17 @@ half-precision requires autocast and won't work.
By default the images are saved in `outputs/img-samples/`.
```Shell
python3 scripts/dream.py --full_precision
python3 scripts/invoke.py --full_precision
```
You'll get the script's prompt. You can see available options or quit.
```Shell
dream> -h
dream> q
invoke> -h
invoke> q
```
## Text to Image
### Text to Image
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
image. This will let you know that everything is set up correctly.
@ -166,10 +172,10 @@ Then increase steps to 100 or more for good (but slower) results.
The prompt can be in quotes or not.
```Shell
dream> The hulk fighting with sheldon cooper -s5 -n1
dream> "woman closeup highly detailed" -s 150
invoke> The hulk fighting with sheldon cooper -s5 -n1
invoke> "woman closeup highly detailed" -s 150
# Reuse previous seed and apply face restoration
dream> "woman closeup highly detailed" --steps 150 --seed -1 -G 0.75
invoke> "woman closeup highly detailed" --steps 150 --seed -1 -G 0.75
```
You'll need to experiment to see if face restoration is making it better or
@ -188,7 +194,7 @@ volume):
docker cp dummy:/data/000001.928403745.png /Users/<your-user>/Pictures
```
## Image to Image
### Image to Image
You can also do text-guided image-to-image translation. For example, turning a
sketch into a detailed drawing.
@ -210,35 +216,35 @@ If you're on a Docker container, copy your input image into the Docker volume
docker cp /Users/<your-user>/Pictures/sketch-mountains-input.jpg dummy:/data/
```
Try it out generating an image (or more). The `dream` script needs absolute
Try it out generating an image (or more). The `invoke` script needs absolute
paths to find the image so don't use `~`.
If you're on your Mac
```Shell
dream> "A fantasy landscape, trending on artstation" -I /Users/<your-user>/Pictures/sketch-mountains-input.jpg --strength 0.75 --steps 100 -n4
invoke> "A fantasy landscape, trending on artstation" -I /Users/<your-user>/Pictures/sketch-mountains-input.jpg --strength 0.75 --steps 100 -n4
```
If you're on a Linux container on your Mac
```Shell
dream> "A fantasy landscape, trending on artstation" -I /data/sketch-mountains-input.jpg --strength 0.75 --steps 50 -n1
invoke> "A fantasy landscape, trending on artstation" -I /data/sketch-mountains-input.jpg --strength 0.75 --steps 50 -n1
```
## Web Interface
### Web Interface
You can use the `dream` script with a graphical web interface. Start the web
You can use the `invoke` script with a graphical web interface. Start the web
server with:
```Shell
python3 scripts/dream.py --full_precision --web
python3 scripts/invoke.py --full_precision --web
```
If it's running on your Mac point your Mac web browser to http://127.0.0.1:9090
Press Control-C at the command line to stop the web server.
## Notes
### Notes
Some text you can add at the end of the prompt to make it very pretty:

View File

@ -26,38 +26,36 @@ title: Linux
3. Copy the InvokeAI source code from GitHub:
```
(base) ~$ git clone https://github.com/invoke-ai/InvokeAI.git
```
```bash
(base) ~$ git clone https://github.com/invoke-ai/InvokeAI.git
```
This will create InvokeAI folder where you will follow the rest of the steps.
This will create InvokeAI folder where you will follow the rest of the steps.
4. Enter the newly-created InvokeAI folder. From this step forward make sure that you are working in the InvokeAI directory!
```
(base) ~$ cd InvokeAI
(base) ~/InvokeAI$
```
```bash
(base) ~$ cd InvokeAI
(base) ~/InvokeAI$
```
5. Use anaconda to copy necessary python packages, create a new python
environment named `ldm` and activate the environment.
environment named `invokeai` and activate the environment.
```bash
(base) ~/InvokeAI$ conda env create
(base) ~/InvokeAI$ conda activate invokeai
(invokeai) ~/InvokeAI$
```
```
(base) ~/InvokeAI$ conda env create
(base) ~/InvokeAI$ conda activate ldm
(ldm) ~/InvokeAI$
```
After these steps, your command prompt will be prefixed by `(ldm)` as shown
After these steps, your command prompt will be prefixed by `(invokeai)` as shown
above.
6. Load a couple of small machine-learning models required by stable diffusion:
```
(ldm) ~/InvokeAI$ python3 scripts/preload_models.py
```
```bash
(invokeai) ~/InvokeAI$ python3 scripts/preload_models.py
```
!!! note
@ -69,7 +67,7 @@ This will create InvokeAI folder where you will follow the rest of the steps.
- For running with the released weights, you will first need to set up an acount
with [Hugging Face](https://huggingface.co).
- Use your credentials to log in, and then point your browser [here](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original.)
- Use your credentials to log in, and then point your browser [here](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original).
- You may be asked to sign a license agreement at this point.
- Click on "Files and versions" near the top of the page, and then click on the
file named "sd-v1-4.ckpt". You'll be taken to a page that prompts you to click
@ -79,34 +77,33 @@ This will create InvokeAI folder where you will follow the rest of the steps.
This will create a symbolic link from the stable-diffusion model.ckpt file, to
the true location of the `sd-v1-4.ckpt` file.
```
(ldm) ~/InvokeAI$ mkdir -p models/ldm/stable-diffusion-v1
(ldm) ~/InvokeAI$ ln -sf /path/to/sd-v1-4.ckpt models/ldm/stable-diffusion-v1/model.ckpt
```
```bash
(invokeai) ~/InvokeAI$ mkdir -p models/ldm/stable-diffusion-v1
(invokeai) ~/InvokeAI$ ln -sf /path/to/sd-v1-4.ckpt models/ldm/stable-diffusion-v1/model.ckpt
```
8. Start generating images!
```
# for the pre-release weights use the -l or --liaon400m switch
(ldm) ~/InvokeAI$ python3 scripts/dream.py -l
```bash
# for the pre-release weights use the -l or --liaon400m switch
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py -l
# for the post-release weights do not use the switch
(ldm) ~/InvokeAI$ python3 scripts/dream.py
# for the post-release weights do not use the switch
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py
# for additional configuration switches and arguments, use -h or --help
(ldm) ~/InvokeAI$ python3 scripts/dream.py -h
```
# for additional configuration switches and arguments, use -h or --help
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py -h
```
9. Subsequently, to relaunch the script, be sure to run "conda activate ldm" (step 5, second command), enter the `InvokeAI` directory, and then launch the dream script (step 8). If you forget to activate the ldm environment, the script will fail with multiple `ModuleNotFound` errors.
9. Subsequently, to relaunch the script, be sure to run "conda activate invokeai" (step 5, second command), enter the `InvokeAI` directory, and then launch the invoke script (step 8). If you forget to activate the 'invokeai' environment, the script will fail with multiple `ModuleNotFound` errors.
## Updating to newer versions of the script
This distribution is changing rapidly. If you used the `git clone` method (step 5) to download the InvokeAI directory, then to update to the latest and greatest version, launch the Anaconda window, enter `InvokeAI` and type:
```
(ldm) ~/InvokeAI$ git pull
```bash
(invokeai) ~/InvokeAI$ git pull
(invokeai) ~/InvokeAI$ conda env update -f environment.yml
```
This will bring your local copy into sync with the remote one.

View File

@ -4,66 +4,63 @@ title: macOS
# :fontawesome-brands-apple: macOS
Invoke AI runs quite well on M1 Macs and we have a number of M1 users
in the community.
While the repo does run on Intel Macs, we only have a couple
reports. If you have an Intel Mac and run into issues, please create
an issue on Github and we will do our best to help.
## Requirements
- macOS 12.3 Monterey or later
- Python
- Patience
- Apple Silicon or Intel Mac
- About 10GB of storage (and 10GB of data if your internet connection has data caps)
- Any M1 Macs or an Intel Macs with 4GB+ of VRAM (ideally more)
Things have moved really fast and so these instructions change often which makes
them outdated pretty fast. One of the problems is that there are so many
different ways to run this.
## Installation
We are trying to build a testing setup so that when we make changes it doesn't
always break.
First you need to download a large checkpoint file.
## How to
1. Sign up at https://huggingface.co
2. Go to the [Stable diffusion diffusion model page](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original)
3. Accept the terms and click Access Repository
4. Download [sd-v1-4.ckpt (4.27 GB)](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/blob/main/sd-v1-4.ckpt) and note where you have saved it (probably the Downloads folder). You may want to move it somewhere else for longer term storage - SD needs this file to run.
(this hasn't been 100% tested yet)
First get the weights checkpoint download started since it's big and will take
some time:
1. Sign up at [huggingface.co](https://huggingface.co)
2. Go to the
[Stable diffusion diffusion model page](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original)
3. Accept the terms and click Access Repository:
4. Download
[sd-v1-4.ckpt (4.27 GB)](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/blob/main/sd-v1-4.ckpt)
and note where you have saved it (probably the Downloads folder)
While that is downloading, open a Terminal and run the following commands:
While that is downloading, open Terminal and run the following commands one at a time, reading the comments and taking care to run the appropriate command for your Mac's architecture (Intel or M1).
!!! todo "Homebrew"
=== "no brew installation yet"
If you have no brew installation yet (otherwise skip):
```bash title="install brew (and Xcode command line tools)"
/bin/bash -c \
"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
```
=== "brew is already installed"
Only if you installed protobuf in a previous version of this tutorial, otherwise skip
`#!bash brew uninstall protobuf`
```bash title="install brew (and Xcode command line tools)"
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
```
!!! todo "Conda Installation"
Now there are two different ways to set up the Python (miniconda) environment:
1. Standalone
2. with pyenv
If you don't know what we are talking about, choose Standalone
1. Standalone
2. with pyenv
If you don't know what we are talking about, choose Standalone. If you are familiar with python environments, choose "with pyenv"
=== "Standalone"
```bash
# install cmake and rust:
brew install cmake rust
```bash title="Install cmake, protobuf, and rust"
brew install cmake protobuf rust
```
Then clone the InvokeAI repository:
```bash title="Clone the InvokeAI repository:
# Clone the Invoke AI repo
git clone https://github.com/invoke-ai/InvokeAI.git
cd InvokeAI
```
Choose the appropriate architecture for your system and install miniconda:
=== "M1 arm64"
```bash title="Install miniconda for M1 arm64"
@ -82,80 +79,82 @@ While that is downloading, open a Terminal and run the following commands:
=== "with pyenv"
```{.bash .annotate}
brew install rust pyenv-virtualenv # (1)!
```bash
brew install pyenv-virtualenv
pyenv install anaconda3-2022.05
pyenv virtualenv anaconda3-2022.05
eval "$(pyenv init -)"
pyenv activate anaconda3-2022.05
```
1. You might already have this installed, if that is the case just continue.
```{.bash .annotate title="local repo setup"}
# clone the repo
git clone https://github.com/invoke-ai/InvokeAI.git
!!! todo "Clone the Invoke AI repo"
cd InvokeAI
```bash
git clone https://github.com/invoke-ai/InvokeAI.git
cd InvokeAI
```
# wait until the checkpoint file has downloaded, then proceed
!!! todo "Wait until the checkpoint-file download finished, then proceed"
# create symlink to checkpoint
mkdir -p models/ldm/stable-diffusion-v1/
We will leave the big checkpoint wherever you stashed it for long-term storage,
and make a link to it from the repo's folder. This allows you to use it for
other repos, or if you need to delete Invoke AI, you won't have to download it again.
PATH_TO_CKPT="$HOME/Downloads" # (1)!
```{.bash .annotate}
# Make the directory in the repo for the symlink
mkdir -p models/ldm/stable-diffusion-v1/
ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" \
models/ldm/stable-diffusion-v1/model.ckpt
```
# This is the folder where you put the checkpoint file `sd-v1-4.ckpt`
PATH_TO_CKPT="$HOME/Downloads" # (1)!
1. or wherever you saved sd-v1-4.ckpt
# Create a link to the checkpoint
ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt
```
!!! todo "create Conda Environment"
1. replace `$HOME/Downloads` with the Location where you actually stored the Checkppoint (`sd-v1-4.ckpt`)
=== "M1 arm64"
!!! todo "Create the environment & install packages"
=== "M1 Mac"
```bash
PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 \
conda env create \
-f environment-mac.yml \
&& conda activate ldm
PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yml
```
=== "Intel x86_64"
=== "Intel x86_64 Mac"
```bash
PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-64 \
conda env create \
-f environment-mac.yml \
&& conda activate ldm
PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-64 conda env create -f environment-mac.yml
```
```{.bash .annotate title="preload models and run script"}
# only need to do this once
python scripts/preload_models.py
```bash
# Activate the environment (you need to do this every time you want to run SD)
conda activate invokeai
# now you can run SD in CLI mode
python scripts/dream.py --full_precision # (1)!
# This will download some bits and pieces and make take a while
(invokeai) python scripts/preload_models.py
# or run the web interface!
python scripts/dream.py --web
# Run SD!
(invokeai) python scripts/dream.py
# The original scripts should work as well.
python scripts/orig_scripts/txt2img.py \
--prompt "a photograph of an astronaut riding a horse" \
--plms
```
# or run the web interface!
(invokeai) python scripts/invoke.py --web
Note, `export PIP_EXISTS_ACTION=w` is a precaution to fix `conda env
create -f environment-mac.yml` never finishing in some situations. So
it isn't required but wont hurt.
# The original scripts should work as well.
(invokeai) python scripts/orig_scripts/txt2img.py \
--prompt "a photograph of an astronaut riding a horse" \
--plms
```
!!! info
`export PIP_EXISTS_ACTION=w` is a precaution to fix `conda env
create -f environment-mac.yml` never finishing in some situations. So
it isn't required but wont hurt.
---
## Common problems
After you followed all the instructions and try to run dream.py, you might
After you followed all the instructions and try to run invoke.py, you might
get several errors. Here's the errors I've seen and found solutions for.
### Is it slow?
@ -172,13 +171,13 @@ python ./scripts/orig_scripts/txt2img.py \
### Doesn't work anymore?
PyTorch nightly includes support for MPS. Because of this, this setup is
inherently unstable. One morning I woke up and it no longer worked no matter
what I did until I switched to miniforge. However, I have another Mac that works
just fine with Anaconda. If you can't get it to work, please search a little
first because many of the errors will get posted and solved. If you can't find a
solution please
[create an issue](https://github.com/invoke-ai/InvokeAI/issues).
PyTorch nightly includes support for MPS. Because of this, this setup
is inherently unstable. One morning I woke up and it no longer worked
no matter what I did until I switched to miniforge. However, I have
another Mac that works just fine with Anaconda. If you can't get it to
work, please search a little first because many of the errors will get
posted and solved. If you can't find a solution please [create an
issue](https://github.com/invoke-ai/InvokeAI/issues).
One debugging step is to update to the latest version of PyTorch nightly.
@ -187,10 +186,9 @@ conda install \
pytorch \
torchvision \
-c pytorch-nightly \
-n ldm
-n invokeai
```
If it takes forever to run `conda env create -f environment-mac.yml`, try this:
```bash
@ -202,27 +200,27 @@ conda clean \
Or you could try to completley reset Anaconda:
```bash
conda update \
--force-reinstall \
-y \
-n base \
-c defaults conda
```bash
conda update \
--force-reinstall \
-y \
-n base \
-c defaults conda
```
---
### "No module named cv2", torch, 'ldm', 'transformers', 'taming', etc
### "No module named cv2", torch, 'invokeai', 'transformers', 'taming', etc
There are several causes of these errors:
1. Did you remember to `conda activate ldm`? If your terminal prompt begins with
"(ldm)" then you activated it. If it begins with "(base)" or something else
1. Did you remember to `conda activate invokeai`? If your terminal prompt begins with
"(invokeai)" then you activated it. If it begins with "(base)" or something else
you haven't.
2. You might've run `./scripts/preload_models.py` or `./scripts/dream.py`
2. You might've run `./scripts/preload_models.py` or `./scripts/invoke.py`
instead of `python ./scripts/preload_models.py` or
`python ./scripts/dream.py`. The cause of this error is long so it's below.
`python ./scripts/invoke.py`. The cause of this error is long so it's below.
<!-- I could not find out where the error is, otherwise would have marked it as a footnote -->
@ -231,17 +229,17 @@ There are several causes of these errors:
```bash
conda deactivate
conda env remove -n ldm
conda env remove -n invokeai
conda env create -f environment-mac.yml
```
4. If you have activated the ldm virtual environment and tried rebuilding it,
4. If you have activated the invokeai virtual environment and tried rebuilding it,
maybe the problem could be that I have something installed that you don't and
you'll just need to manually install it. Make sure you activate the virtual
environment so it installs there instead of globally.
```bash
conda activate ldm
conda activate invokeai
pip install <package name>
```
@ -299,12 +297,12 @@ should actually be the _same python_, which you can verify by comparing the
output of `python3 -V` and `python -V`.
```bash
(ldm) % which python
/Users/name/miniforge3/envs/ldm/bin/python
(invokeai) % which python
/Users/name/miniforge3/envs/invokeai/bin/python
```
The above is what you'll see if you have miniforge and correctly activated the
ldm environment, while usingd the standalone setup instructions above.
invokeai environment, while usingd the standalone setup instructions above.
If you otherwise installed via pyenv, you will get this result:
@ -378,7 +376,7 @@ python scripts/preload_models.py
WARNING: this will be slower than running natively on MPS.
```
This fork already includes a fix for this in
The InvokeAI version includes this fix in
[environment-mac.yml](https://github.com/invoke-ai/InvokeAI/blob/main/environment-mac.yml).
### "Could not build wheels for tokenizers"
@ -463,13 +461,10 @@ C.
You don't have a virus. It's part of the project. Here's
[Rick](https://github.com/invoke-ai/InvokeAI/blob/main/assets/rick.jpeg)
and here's
[the code](https://github.com/invoke-ai/InvokeAI/blob/69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc/scripts/txt2img.py#L79)
that swaps him in. It's a NSFW filter, which IMO, doesn't work very good (and we
call this "computer vision", sheesh).
Actually, this could be happening because there's not enough RAM. You could try
the `model.half()` suggestion or specify smaller output images.
and here's [the
code](https://github.com/invoke-ai/InvokeAI/blob/69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc/scripts/txt2img.py#L79)
that swaps him in. It's a NSFW filter, which IMO, doesn't work very
good (and we call this "computer vision", sheesh).
---
@ -487,16 +482,14 @@ this issue too. I should probably test it.
### "view size is not compatible with input tensor's size and stride"
```bash
File "/opt/anaconda3/envs/ldm/lib/python3.10/site-packages/torch/nn/functional.py", line 2511, in layer_norm
File "/opt/anaconda3/envs/invokeai/lib/python3.10/site-packages/torch/nn/functional.py", line 2511, in layer_norm
return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled)
RuntimeError: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
```
Update to the latest version of invoke-ai/InvokeAI. We were patching
pytorch but we found a file in stable-diffusion that we could change instead.
This is a 32-bit vs 16-bit problem.
---
Update to the latest version of invoke-ai/InvokeAI. We were
patching pytorch but we found a file in stable-diffusion that we could
change instead. This is a 32-bit vs 16-bit problem.
### The processor must support the Intel bla bla bla
@ -519,13 +512,13 @@ use ARM packages, and use `nomkl` as described above.
May appear when just starting to generate, e.g.:
```bash
dream> clouds
invoke> clouds
Generating: 0%| | 0/1 [00:00<?, ?it/s]/Users/[...]/dev/stable-diffusion/ldm/modules/embedding_manager.py:152: UserWarning: The operator 'aten::nonzero' is not currently supported on the MPS backend and will fall back to run on the CPU. This may have performance implications. (Triggered internally at /Users/runner/work/_temp/anaconda/conda-bld/pytorch_1662016319283/work/aten/src/ATen/mps/MPSFallback.mm:11.)
placeholder_idx = torch.where(
loc("mps_add"("(mpsFileLoc): /AppleInternal/Library/BuildRoots/20d6c351-ee94-11ec-bcaf-7247572f23b4/Library/Caches/com.apple.xbs/Sources/MetalPerformanceShadersGraph/mpsgraph/MetalPerformanceShadersGraph/Core/Files/MPSGraphUtilities.mm":219:0)): error: input types 'tensor<2x1280xf32>' and 'tensor<*xf16>' are not broadcast compatible
LLVM ERROR: Failed to infer result type(s).
Abort trap: 6
/Users/[...]/opt/anaconda3/envs/ldm/lib/python3.9/multiprocessing/resource_tracker.py:216: UserWarning: resource_tracker: There appear to be 1 leaked semaphore objects to clean up at shutdown
/Users/[...]/opt/anaconda3/envs/invokeai/lib/python3.9/multiprocessing/resource_tracker.py:216: UserWarning: resource_tracker: There appear to be 1 leaked semaphore objects to clean up at shutdown
warnings.warn('resource_tracker: There appear to be %d '
```

View File

@ -39,7 +39,7 @@ in the wiki
4. Run the command:
```bash
```batch
git clone https://github.com/invoke-ai/InvokeAI.git
```
@ -48,17 +48,21 @@ in the wiki
5. Enter the newly-created InvokeAI folder. From this step forward make sure that you are working in the InvokeAI directory!
```
cd InvokeAI
```
```batch
cd InvokeAI
```
6. Run the following two commands:
```
conda env create (step 6a)
conda activate ldm (step 6b)
```
This will install all python requirements and activate the "ldm" environment
```batch title="step 6a"
conda env create
```
```batch title="step 6b"
conda activate invokeai
```
This will install all python requirements and activate the "invokeai" environment
which sets PATH and other environment variables properly.
Note that the long form of the first command is `conda env create -f environment.yml`. If the
@ -67,7 +71,7 @@ conda activate ldm (step 6b)
7. Run the command:
```bash
```batch
python scripts\preload_models.py
```
@ -79,45 +83,44 @@ conda activate ldm (step 6b)
8. Now you need to install the weights for the big stable diffusion model.
- For running with the released weights, you will first need to set up an acount with Hugging Face (https://huggingface.co).
- Use your credentials to log in, and then point your browser at https://huggingface.co/CompVis/stable-diffusion-v-1-4-original.
- You may be asked to sign a license agreement at this point.
- Click on "Files and versions" near the top of the page, and then click on the file named `sd-v1-4.ckpt`. You'll be taken to a page that
prompts you to click the "download" link. Now save the file somewhere safe on your local machine.
- The weight file is >4 GB in size, so
downloading may take a while.
1. For running with the released weights, you will first need to set up an acount with Hugging Face (https://huggingface.co).
2. Use your credentials to log in, and then point your browser at https://huggingface.co/CompVis/stable-diffusion-v-1-4-original.
3. You may be asked to sign a license agreement at this point.
4. Click on "Files and versions" near the top of the page, and then click on the file named `sd-v1-4.ckpt`. You'll be taken to a page that
prompts you to click the "download" link. Now save the file somewhere safe on your local machine.
5. The weight file is >4 GB in size, so
downloading may take a while.
Now run the following commands from **within the InvokeAI directory** to copy the weights file to the right place:
Now run the following commands from **within the InvokeAI directory** to copy the weights file to the right place:
```
mkdir -p models\ldm\stable-diffusion-v1
copy C:\path\to\sd-v1-4.ckpt models\ldm\stable-diffusion-v1\model.ckpt
```
```batch
mkdir -p models\ldm\stable-diffusion-v1
copy C:\path\to\sd-v1-4.ckpt models\ldm\stable-diffusion-v1\model.ckpt
```
Please replace `C:\path\to\sd-v1.4.ckpt` with the correct path to wherever you stashed this file. If you prefer not to copy or move the .ckpt file,
you may instead create a shortcut to it from within `models\ldm\stable-diffusion-v1\`.
Please replace `C:\path\to\sd-v1.4.ckpt` with the correct path to wherever you stashed this file. If you prefer not to copy or move the .ckpt file,
you may instead create a shortcut to it from within `models\ldm\stable-diffusion-v1\`.
9. Start generating images!
```bash
# for the pre-release weights
python scripts\dream.py -l
# for the post-release weights
python scripts\dream.py
```batch title="for the pre-release weights"
python scripts\invoke.py -l
```
10. Subsequently, to relaunch the script, first activate the Anaconda command window (step 3),enter the InvokeAI directory (step 5, `cd \path\to\InvokeAI`), run `conda activate ldm` (step 6b), and then launch the dream script (step 9).
```batch title="for the post-release weights"
python scripts\invoke.py
```
10. Subsequently, to relaunch the script, first activate the Anaconda command window (step 3),enter the InvokeAI directory (step 5, `cd \path\to\InvokeAI`), run `conda activate invokeai` (step 6b), and then launch the invoke script (step 9).
!!! tip "Tildebyte has written an alternative"
**Note:** Tildebyte has written an alternative
["Easy peasy Windows install"](https://github.com/invoke-ai/InvokeAI/wiki/Easy-peasy-Windows-install)
which uses the Windows Powershell and pew. If you are having trouble with
Anaconda on Windows, give this a try (or try it first!)
---
This distribution is changing rapidly. If you used the `git clone` method (step 5) to download the InvokeAI directory, then to update to the latest and greatest version, launch the Anaconda window, enter `InvokeAI`, and type:
This distribution is changing rapidly. If you used the `git clone` method
(step 5) to download the stable-diffusion directory, then to update to the
latest and greatest version, launch the Anaconda window, enter

View File

@ -58,6 +58,7 @@ We thank them for all of their time and hard work.
- [rabidcopy](https://github.com/rabidcopy)
- [Dominic Letz](https://github.com/dominicletz)
- [Dmitry T.](https://github.com/ArDiouscuros)
- [Kent Keirsey](https://github.com/hipsterusername)
## **Original CompVis Authors:**

View File

@ -1,14 +1,14 @@
name: ldm
name: invokeai
channels:
- pytorch
- conda-forge
dependencies:
- python==3.10.5
- python==3.9.13
- pip==22.2.2
# pytorch left unpinned
- pytorch
- torchvision
- pytorch==1.12.1
- torchvision==0.13.1
# I suggest to keep the other deps sorted for convenience.
# To determine what the latest versions should be, run:
@ -27,13 +27,12 @@ dependencies:
- imgaug==0.4.0
- kornia==0.6.7
- mpmath==1.2.1
- nomkl
- nomkl=1.0
- numpy==1.23.2
- omegaconf==2.1.1
- openh264==2.3.0
- onnx==1.12.0
- onnxruntime==1.12.1
- protobuf==3.19.4
- pudb==2022.1
- pytorch-lightning==1.7.5
- scipy==1.9.1
@ -42,22 +41,23 @@ dependencies:
- tensorboard==2.10.0
- torchmetrics==0.9.3
- pip:
- flask==2.1.3
- flask_socketio==5.3.0
- flask_cors==3.0.10
- dependency_injector==4.40.0
- eventlet
- opencv-python==4.6.0
- protobuf==3.20.1
- realesrgan==0.2.5.0
- send2trash==1.8.0
- test-tube==0.7.5
- transformers==4.21.2
- torch-fidelity==0.3.0
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
- -e git+https://github.com/lstein/GFPGAN@fix-dark-cast-images#egg=gfpgan
- -e .
- flask==2.1.3
- flask_socketio==5.3.0
- flask_cors==3.0.10
- dependency_injector==4.40.0
- eventlet==0.33.1
- opencv-python==4.6.0
- protobuf==3.19.6
- realesrgan==0.2.5.0
- send2trash==1.8.0
- test-tube==0.7.5
- transformers==4.21.3
- torch-fidelity==0.3.0
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
- -e .
variables:
PYTORCH_ENABLE_MPS_FALLBACK: 1

View File

@ -1,4 +1,4 @@
name: ldm
name: invokeai
channels:
- pytorch
- defaults
@ -15,7 +15,7 @@ dependencies:
- pudb==2019.2
- imageio==2.9.0
- imageio-ffmpeg==0.4.2
- pytorch-lightning==1.4.2
- pytorch-lightning==1.7.7
- omegaconf==2.1.1
- realesrgan==0.2.5.0
- test-tube>=0.7.5
@ -25,8 +25,8 @@ dependencies:
- einops==0.3.0
- pyreadline3
- torch-fidelity==0.3.0
- transformers==4.19.2
- torchmetrics==0.6.0
- transformers==4.21.3
- torchmetrics==0.7.0
- flask==2.1.3
- flask_socketio==5.3.0
- flask_cors==3.0.10
@ -36,5 +36,6 @@ dependencies:
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
- -e git+https://github.com/lstein/GFPGAN@fix-dark-cast-images#egg=gfpgan
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
- -e .

2
frontend/.gitignore vendored
View File

@ -23,5 +23,3 @@ dist-ssr
*.njsproj
*.sln
*.sw?
dist/*

Binary file not shown.

BIN
frontend/dist/assets/Inter.b9a8e5e2.ttf vendored Normal file

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 116 KiB

File diff suppressed because one or more lines are too long

690
frontend/dist/assets/index.b06af007.js vendored Normal file

File diff suppressed because one or more lines are too long

BIN
frontend/dist/assets/logo.13003d72.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

18
frontend/dist/index.html vendored Normal file
View File

@ -0,0 +1,18 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>InvokeAI - A Stable Diffusion Toolkit</title>
<link rel="shortcut icon" type="icon" href="/assets/favicon.0d253ced.ico" />
<script type="module" crossorigin src="/assets/index.b06af007.js"></script>
<link rel="stylesheet" href="/assets/index.58175ea1.css">
</head>
<body>
<div id="root"></div>
</body>
</html>

View File

@ -23,7 +23,9 @@
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-dropzone": "^14.2.2",
"react-hotkeys-hook": "^3.4.7",
"react-icons": "^4.4.0",
"react-masonry-css": "^1.0.16",
"react-redux": "^8.0.2",
"redux-persist": "^6.0.0",
"socket.io": "^4.5.2",

View File

@ -2,6 +2,7 @@
.App {
display: grid;
width: max-content;
}
.app-content {
@ -14,4 +15,9 @@
grid-auto-rows: max-content;
width: $app-width;
height: $app-height;
min-width: min-content;
}
.app-console {
z-index: 9999;
}

View File

@ -26,7 +26,9 @@ const App = () => {
<SiteHeader />
<InvokeTabs />
</div>
<Console />
<div className="app-console">
<Console />
</div>
</div>
) : (
<Loading />

View File

@ -32,26 +32,8 @@ export const UPSCALING_LEVELS: Array<{ key: string; value: number }> = [
{ key: '4x', value: 4 },
];
// Internal to human-readable parameters
export const PARAMETERS: { [key: string]: string } = {
prompt: 'Prompt',
iterations: 'Iterations',
steps: 'Steps',
cfgScale: 'CFG Scale',
height: 'Height',
width: 'Width',
sampler: 'Sampler',
seed: 'Seed',
img2imgStrength: 'img2img Strength',
gfpganStrength: 'GFPGAN Strength',
upscalingLevel: 'Upscaling Level',
upscalingStrength: 'Upscaling Strength',
initialImagePath: 'Initial Image',
maskPath: 'Initial Image Mask',
shouldFitToWidthHeight: 'Fit Initial Image',
seamless: 'Seamless Tiling',
};
export const NUMPY_RAND_MIN = 0;
export const NUMPY_RAND_MAX = 4294967295;
export const FACETOOL_TYPES = ['gfpgan', 'codeformer'] as const;

View File

@ -14,10 +14,13 @@ export enum Feature {
FACE_CORRECTION,
IMAGE_TO_IMAGE,
}
/** For each tooltip in the UI, the below feature definitions & props will pull relevant information into the tooltip.
*
* To-do: href & GuideImages are placeholders, and are not currently utilized, but will be updated (along with the tooltip UI) as feature and UI development and we get a better idea on where things "forever homes" will be .
*/
export const FEATURES: Record<Feature, FeatureHelpInfo> = {
[Feature.PROMPT]: {
text: 'This field will take all prompt text, including both content and stylistic terms. CLI Commands will not work in the prompt.',
text: 'This field will take all prompt text, including both content and stylistic terms. While weights can be included in the prompt, standard CLI Commands/parameters will not work.',
href: 'link/to/docs/feature3.html',
guideImage: 'asset/path.gif',
},
@ -27,17 +30,17 @@ export const FEATURES: Record<Feature, FeatureHelpInfo> = {
guideImage: 'asset/path.gif',
},
[Feature.OTHER]: {
text: 'Additional Options',
text: 'These options will enable alternative processing modes for Invoke. Seamless tiling will work to generate repeating patterns in the output. High Resolution Optimization performs a two-step generation cycle, and should be used at higher resolutions when you desire a more coherent image/composition. ',
href: 'link/to/docs/feature3.html',
guideImage: 'asset/path.gif',
},
[Feature.SEED]: {
text: 'Seed values provide an initial set of noise which guide the denoising process.',
text: 'Seed values provide an initial set of noise which guide the denoising process, and can be randomized or populated with a seed from a previous invocation. The Threshold feature can be used to mitigate undesirable outcomes at higher CFG values (try between 0-10), and Perlin can be used to add Perlin noise into the denoising process - Both serve to add variation to your outputs. ',
href: 'link/to/docs/feature3.html',
guideImage: 'asset/path.gif',
},
[Feature.VARIATIONS]: {
text: 'Try a variation with an amount of between 0 and 1 to change the output image for the set seed.',
text: 'Try a variation with an amount of between 0 and 1 to change the output image for the set seed - Interesting variations on the seed are found between 0.1 and 0.3.',
href: 'link/to/docs/feature3.html',
guideImage: 'asset/path.gif',
},
@ -47,8 +50,8 @@ export const FEATURES: Record<Feature, FeatureHelpInfo> = {
guideImage: 'asset/path.gif',
},
[Feature.FACE_CORRECTION]: {
text: 'Using GFPGAN or CodeFormer, Face Correction will attempt to identify faces in outputs, and correct any defects/abnormalities. Higher values will apply a stronger corrective pressure on outputs.',
href: 'link/to/docs/feature2.html',
text: 'Using GFPGAN or Codeformer, Face Correction will attempt to identify faces in outputs, and correct any defects/abnormalities. Higher strength values will apply a stronger corrective pressure on outputs, resulting in more appealing faces. With Codeformer, a higher fidelity will attempt to preserve the original image, at the expense of face correction strength.',
href: 'link/to/docs/feature3.html',
guideImage: 'asset/path.gif',
},
[Feature.IMAGE_TO_IMAGE]: {

View File

@ -55,6 +55,7 @@ export declare type CommonGeneratedImageMetadata = {
width: number;
height: number;
seamless: boolean;
hires_fix: boolean;
extra: null | Record<string, never>; // Pending development of RFC #266
};
@ -88,15 +89,16 @@ export declare type ESRGANMetadata = CommonPostProcessedImageMetadata & {
strength: number;
};
export declare type GFPGANMetadata = CommonPostProcessedImageMetadata & {
type: 'gfpgan';
export declare type FacetoolMetadata = CommonPostProcessedImageMetadata & {
type: 'gfpgan' | 'codeformer';
strength: number;
fidelity?: number;
};
// Superset of all postprocessed image metadata types..
export declare type PostProcessedImageMetadata =
| ESRGANMetadata
| GFPGANMetadata;
| FacetoolMetadata;
// Metadata includes the system config and image metadata.
export declare type Metadata = SystemConfig & {

View File

@ -10,7 +10,7 @@ import * as InvokeAI from '../invokeai';
export const generateImage = createAction<undefined>('socketio/generateImage');
export const runESRGAN = createAction<InvokeAI.Image>('socketio/runESRGAN');
export const runGFPGAN = createAction<InvokeAI.Image>('socketio/runGFPGAN');
export const runFacetool = createAction<InvokeAI.Image>('socketio/runFacetool');
export const deleteImage = createAction<InvokeAI.Image>('socketio/deleteImage');
export const requestImages = createAction<undefined>(
'socketio/requestImages'

View File

@ -6,6 +6,7 @@ import {
addLogEntry,
setIsProcessing,
} from '../../features/system/systemSlice';
import { tabMap, tab_dict } from '../../features/tabs/InvokeTabs';
import * as InvokeAI from '../invokeai';
/**
@ -23,14 +24,20 @@ const makeSocketIOEmitters = (
emitGenerateImage: () => {
dispatch(setIsProcessing(true));
const { generationParameters, esrganParameters, gfpganParameters } =
frontendToBackendParameters(getState().options, getState().system);
const options = { ...getState().options };
if (tabMap[options.activeTab] === 'txt2img') {
options.shouldUseInitImage = false;
}
const { generationParameters, esrganParameters, facetoolParameters } =
frontendToBackendParameters(options, getState().system);
socketio.emit(
'generateImage',
generationParameters,
esrganParameters,
gfpganParameters
facetoolParameters
);
dispatch(
@ -39,7 +46,7 @@ const makeSocketIOEmitters = (
message: `Image generation requested: ${JSON.stringify({
...generationParameters,
...esrganParameters,
...gfpganParameters,
...facetoolParameters,
})}`,
})
);
@ -64,24 +71,32 @@ const makeSocketIOEmitters = (
})
);
},
emitRunGFPGAN: (imageToProcess: InvokeAI.Image) => {
emitRunFacetool: (imageToProcess: InvokeAI.Image) => {
dispatch(setIsProcessing(true));
const { gfpganStrength } = getState().options;
const { facetoolType, facetoolStrength, codeformerFidelity } =
getState().options;
const gfpganParameters = {
gfpgan_strength: gfpganStrength,
const facetoolParameters: Record<string, any> = {
facetool_strength: facetoolStrength,
};
if (facetoolType === 'codeformer') {
facetoolParameters.codeformer_fidelity = codeformerFidelity;
}
socketio.emit('runPostprocessing', imageToProcess, {
type: 'gfpgan',
...gfpganParameters,
type: facetoolType,
...facetoolParameters,
});
dispatch(
addLogEntry({
timestamp: dateFormat(new Date(), 'isoDateTime'),
message: `GFPGAN fix faces requested: ${JSON.stringify({
file: imageToProcess.url,
...gfpganParameters,
})}`,
message: `Face restoration (${facetoolType}) requested: ${JSON.stringify(
{
file: imageToProcess.url,
...facetoolParameters,
}
)}`,
})
);
},

View File

@ -151,32 +151,6 @@ const makeSocketIOListeners = (
console.error(e);
}
},
/**
* Callback to run when we receive a 'gfpganResult' event.
*/
onGFPGANResult: (data: InvokeAI.ImageResultResponse) => {
try {
const { url, metadata, mtime } = data;
dispatch(
addImage({
uuid: uuidv4(),
url,
mtime,
metadata,
})
);
dispatch(
addLogEntry({
timestamp: dateFormat(new Date(), 'isoDateTime'),
message: `Fixed faces: ${url}`,
})
);
} catch (e) {
console.error(e);
}
},
/**
* Callback to run when we receive a 'progressUpdate' event.
* TODO: Add additional progress phases

View File

@ -22,9 +22,9 @@ import * as InvokeAI from '../invokeai';
* some new action to handle whatever data was sent from the server.
*/
export const socketioMiddleware = () => {
const { hostname, port } = new URL(window.location.href);
const { origin } = new URL(window.location.href);
const socketio = io(`http://${hostname}:${port}`, {
const socketio = io(origin, {
timeout: 60000,
});
@ -50,7 +50,7 @@ export const socketioMiddleware = () => {
const {
emitGenerateImage,
emitRunESRGAN,
emitRunGFPGAN,
emitRunFacetool,
emitDeleteImage,
emitRequestImages,
emitRequestNewImages,
@ -129,8 +129,8 @@ export const socketioMiddleware = () => {
break;
}
case 'socketio/runGFPGAN': {
emitRunGFPGAN(action.payload);
case 'socketio/runFacetool': {
emitRunFacetool(action.payload);
break;
}

View File

@ -1,7 +1,13 @@
import { IconButtonProps, IconButton, Tooltip } from '@chakra-ui/react';
import {
IconButtonProps,
IconButton,
Tooltip,
PlacementWithLogical,
} from '@chakra-ui/react';
interface Props extends IconButtonProps {
tooltip?: string;
tooltipPlacement?: PlacementWithLogical | undefined;
}
/**
@ -10,10 +16,14 @@ interface Props extends IconButtonProps {
* TODO: Get rid of this.
*/
const IAIIconButton = (props: Props) => {
const { tooltip = '', onClick, ...rest } = props;
const { tooltip = '', tooltipPlacement = 'bottom', onClick, ...rest } = props;
return (
<Tooltip label={tooltip}>
<IconButton {...rest} cursor={onClick ? 'pointer' : 'unset'} onClick={onClick}/>
<Tooltip label={tooltip} hasArrow placement={tooltipPlacement}>
<IconButton
{...rest}
cursor={onClick ? 'pointer' : 'unset'}
onClick={onClick}
/>
</Tooltip>
);
};

View File

@ -0,0 +1,65 @@
import { Button, useToast } from '@chakra-ui/react';
import React, { useCallback } from 'react';
import { FileRejection } from 'react-dropzone';
import { useAppDispatch } from '../../app/store';
import ImageUploader from '../../features/options/ImageUploader';
interface InvokeImageUploaderProps {
label?: string;
icon?: any;
onMouseOver?: any;
OnMouseout?: any;
dispatcher: any;
styleClass?: string;
}
export default function InvokeImageUploader(props: InvokeImageUploaderProps) {
const { label, icon, dispatcher, styleClass, onMouseOver, OnMouseout } =
props;
const toast = useToast();
const dispatch = useAppDispatch();
// Callbacks to for handling file upload attempts
const fileAcceptedCallback = useCallback(
(file: File) => dispatch(dispatcher(file)),
[dispatch, dispatcher]
);
const fileRejectionCallback = useCallback(
(rejection: FileRejection) => {
const msg = rejection.errors.reduce(
(acc: string, cur: { message: string }) => acc + '\n' + cur.message,
''
);
toast({
title: 'Upload failed',
description: msg,
status: 'error',
isClosable: true,
});
},
[toast]
);
return (
<ImageUploader
fileAcceptedCallback={fileAcceptedCallback}
fileRejectionCallback={fileRejectionCallback}
styleClass={styleClass}
>
<Button
size={'sm'}
fontSize={'md'}
fontWeight={'normal'}
onMouseOver={onMouseOver}
onMouseOut={OnMouseout}
leftIcon={icon}
width={'100%'}
>
{label ? label : null}
</Button>
</ImageUploader>
);
}

View File

@ -18,6 +18,7 @@ export const optionsSelector = createSelector(
maskPath: options.maskPath,
initialImagePath: options.initialImagePath,
seed: options.seed,
activeTab: options.activeTab,
};
},
{
@ -55,6 +56,7 @@ const useCheckParameters = (): boolean => {
maskPath,
initialImagePath,
seed,
activeTab,
} = useAppSelector(optionsSelector);
const { isProcessing, isConnected } = useAppSelector(systemSelector);
@ -65,6 +67,10 @@ const useCheckParameters = (): boolean => {
return false;
}
if (prompt && !initialImagePath && activeTab === 1) {
return false;
}
// Cannot generate with a mask without img2img
if (maskPath && !initialImagePath) {
return false;
@ -100,6 +106,7 @@ const useCheckParameters = (): boolean => {
shouldGenerateVariations,
seedWeights,
seed,
activeTab,
]);
};

View File

@ -29,6 +29,7 @@ export const frontendToBackendParameters = (
sampler,
seed,
seamless,
hiresFix,
shouldUseInitImage,
img2imgStrength,
initialImagePath,
@ -40,8 +41,10 @@ export const frontendToBackendParameters = (
shouldRunESRGAN,
upscalingLevel,
upscalingStrength,
shouldRunGFPGAN,
gfpganStrength,
shouldRunFacetool,
facetoolStrength,
codeformerFidelity,
facetoolType,
shouldRandomizeSeed,
} = optionsState;
@ -59,6 +62,7 @@ export const frontendToBackendParameters = (
sampler_name: sampler,
seed,
seamless,
hires_fix: hiresFix,
progress_images: shouldDisplayInProgress,
};
@ -86,7 +90,7 @@ export const frontendToBackendParameters = (
}
let esrganParameters: false | { [k: string]: any } = false;
let gfpganParameters: false | { [k: string]: any } = false;
let facetoolParameters: false | { [k: string]: any } = false;
if (shouldRunESRGAN) {
esrganParameters = {
@ -95,97 +99,19 @@ export const frontendToBackendParameters = (
};
}
if (shouldRunGFPGAN) {
gfpganParameters = {
strength: gfpganStrength,
if (shouldRunFacetool) {
facetoolParameters = {
type: facetoolType,
strength: facetoolStrength,
};
if (facetoolType === 'codeformer') {
facetoolParameters.codeformer_fidelity = codeformerFidelity
}
}
return {
generationParameters,
esrganParameters,
gfpganParameters,
facetoolParameters,
};
};
export const backendToFrontendParameters = (parameters: {
[key: string]: any;
}) => {
const {
prompt,
iterations,
steps,
cfg_scale,
threshold,
perlin,
height,
width,
sampler_name,
seed,
seamless,
progress_images,
variation_amount,
with_variations,
gfpgan_strength,
upscale,
init_img,
init_mask,
strength,
} = parameters;
const options: { [key: string]: any } = {
shouldDisplayInProgress: progress_images,
// init
shouldGenerateVariations: false,
shouldRunESRGAN: false,
shouldRunGFPGAN: false,
initialImagePath: '',
maskPath: '',
};
if (variation_amount > 0) {
options.shouldGenerateVariations = true;
options.variationAmount = variation_amount;
if (with_variations) {
options.seedWeights = seedWeightsToString(with_variations);
}
}
if (gfpgan_strength > 0) {
options.shouldRunGFPGAN = true;
options.gfpganStrength = gfpgan_strength;
}
if (upscale) {
options.shouldRunESRGAN = true;
options.upscalingLevel = upscale[0];
options.upscalingStrength = upscale[1];
}
if (init_img) {
options.shouldUseInitImage = true;
options.initialImagePath = init_img;
options.strength = strength;
if (init_mask) {
options.maskPath = init_mask;
}
}
// if we had a prompt, add all the metadata, but if we don't have a prompt,
// we must have only done ESRGAN or GFPGAN so do not add that metadata
if (prompt) {
options.prompt = prompt;
options.iterations = iterations;
options.steps = steps;
options.cfgScale = cfg_scale;
options.threshold = threshold;
options.perlin = perlin;
options.height = height;
options.width = width;
options.sampler = sampler_name;
options.seed = seed;
options.seamless = seamless;
}
return options;
};

View File

@ -6,19 +6,23 @@ import * as InvokeAI from '../../app/invokeai';
import { useAppDispatch, useAppSelector } from '../../app/store';
import { RootState } from '../../app/store';
import {
setActiveTab,
setAllParameters,
setInitialImagePath,
setSeed,
setShouldShowImageDetails,
} from '../options/optionsSlice';
import DeleteImageModal from './DeleteImageModal';
import { SystemState } from '../system/systemSlice';
import IAIButton from '../../common/components/IAIButton';
import { runESRGAN, runGFPGAN } from '../../app/socketio/actions';
import { runESRGAN, runFacetool } from '../../app/socketio/actions';
import IAIIconButton from '../../common/components/IAIIconButton';
import { MdDelete, MdFace, MdHd, MdImage, MdInfo } from 'react-icons/md';
import InvokePopover from './InvokePopover';
import UpscaleOptions from '../options/AdvancedOptions/Upscale/UpscaleOptions';
import FaceRestoreOptions from '../options/AdvancedOptions/FaceRestore/FaceRestoreOptions';
import { useHotkeys } from 'react-hotkeys-hook';
import { useToast } from '@chakra-ui/react';
const systemSelector = createSelector(
(state: RootState) => state.system,
@ -39,21 +43,21 @@ const systemSelector = createSelector(
type CurrentImageButtonsProps = {
image: InvokeAI.Image;
shouldShowImageDetails: boolean;
setShouldShowImageDetails: (b: boolean) => void;
};
/**
* Row of buttons for common actions:
* Use as init image, use all params, use seed, upscale, fix faces, details, delete.
*/
const CurrentImageButtons = ({
image,
shouldShowImageDetails,
setShouldShowImageDetails,
}: CurrentImageButtonsProps) => {
const CurrentImageButtons = ({ image }: CurrentImageButtonsProps) => {
const dispatch = useAppDispatch();
const shouldShowImageDetails = useAppSelector(
(state: RootState) => state.options.shouldShowImageDetails
);
const toast = useToast();
const intermediateImage = useAppSelector(
(state: RootState) => state.gallery.intermediateImage
);
@ -62,35 +66,184 @@ const CurrentImageButtons = ({
(state: RootState) => state.options.upscalingLevel
);
const gfpganStrength = useAppSelector(
(state: RootState) => state.options.gfpganStrength
const facetoolStrength = useAppSelector(
(state: RootState) => state.options.facetoolStrength
);
const { isProcessing, isConnected, isGFPGANAvailable, isESRGANAvailable } =
useAppSelector(systemSelector);
const handleClickUseAsInitialImage = () =>
const handleClickUseAsInitialImage = () => {
dispatch(setInitialImagePath(image.url));
dispatch(setActiveTab(1));
};
useHotkeys(
'shift+i',
() => {
if (image) {
handleClickUseAsInitialImage();
toast({
title: 'Sent To Image To Image',
status: 'success',
duration: 2500,
isClosable: true,
});
} else {
toast({
title: 'No Image Loaded',
description: 'No image found to send to image to image module.',
status: 'error',
duration: 2500,
isClosable: true,
});
}
},
[image]
);
const handleClickUseAllParameters = () =>
dispatch(setAllParameters(image.metadata));
useHotkeys(
'a',
() => {
if (['txt2img', 'img2img'].includes(image?.metadata?.image?.type)) {
handleClickUseAllParameters();
toast({
title: 'Parameters Set',
status: 'success',
duration: 2500,
isClosable: true,
});
} else {
toast({
title: 'Parameters Not Set',
description: 'No metadata found for this image.',
status: 'error',
duration: 2500,
isClosable: true,
});
}
},
[image]
);
// Non-null assertion: this button is disabled if there is no seed.
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
const handleClickUseSeed = () => dispatch(setSeed(image.metadata.image.seed));
const handleClickUpscale = () => dispatch(runESRGAN(image));
useHotkeys(
's',
() => {
if (image?.metadata?.image?.seed) {
handleClickUseSeed();
toast({
title: 'Seed Set',
status: 'success',
duration: 2500,
isClosable: true,
});
} else {
toast({
title: 'Seed Not Set',
description: 'Could not find seed for this image.',
status: 'error',
duration: 2500,
isClosable: true,
});
}
},
[image]
);
const handleClickFixFaces = () => dispatch(runGFPGAN(image));
const handleClickUpscale = () => dispatch(runESRGAN(image));
useHotkeys(
'u',
() => {
if (
isESRGANAvailable &&
Boolean(!intermediateImage) &&
isConnected &&
!isProcessing &&
upscalingLevel
) {
handleClickUpscale();
} else {
toast({
title: 'Upscaling Failed',
status: 'error',
duration: 2500,
isClosable: true,
});
}
},
[
image,
isESRGANAvailable,
intermediateImage,
isConnected,
isProcessing,
upscalingLevel,
]
);
const handleClickFixFaces = () => dispatch(runFacetool(image));
useHotkeys(
'r',
() => {
if (
isGFPGANAvailable &&
Boolean(!intermediateImage) &&
isConnected &&
!isProcessing &&
facetoolStrength
) {
handleClickFixFaces();
} else {
toast({
title: 'Face Restoration Failed',
status: 'error',
duration: 2500,
isClosable: true,
});
}
},
[
image,
isGFPGANAvailable,
intermediateImage,
isConnected,
isProcessing,
facetoolStrength,
]
);
const handleClickShowImageDetails = () =>
setShouldShowImageDetails(!shouldShowImageDetails);
dispatch(setShouldShowImageDetails(!shouldShowImageDetails));
useHotkeys(
'i',
() => {
if (image) {
handleClickShowImageDetails();
} else {
toast({
title: 'Failed to load metadata',
status: 'error',
duration: 2500,
isClosable: true,
});
}
},
[image, shouldShowImageDetails]
);
return (
<div className="current-image-options">
<IAIIconButton
icon={<MdImage />}
tooltip="Use As Initial Image"
aria-label="Use As Initial Image"
tooltip="Send To Image To Image"
aria-label="Send To Image To Image"
onClick={handleClickUseAsInitialImage}
/>
@ -118,7 +271,7 @@ const CurrentImageButtons = ({
!isGFPGANAvailable ||
Boolean(intermediateImage) ||
!(isConnected && !isProcessing) ||
!gfpganStrength
!facetoolStrength
}
onClick={handleClickFixFaces}
/>

View File

@ -11,23 +11,7 @@
border-radius: 0.5rem;
}
.current-image-display-placeholder {
background-color: var(--background-color-secondary);
display: flex;
align-items: center;
justify-content: center;
width: 100%;
height: 100%;
svg {
width: 10rem;
height: 10rem;
color: var(--svg-color);
}
}
.current-image-tools {
grid-area: current-image-tools;
width: 100%;
height: 100%;
display: grid;
@ -58,34 +42,69 @@
align-items: center;
display: grid;
width: 100%;
grid-template-areas: 'current-image-content';
img {
grid-area: current-image-content;
background-color: var(--img2img-img-bg-color);
border-radius: 0.5rem;
object-fit: contain;
width: auto;
height: $app-gallery-height;
max-height: $app-gallery-height;
}
}
.current-image-metadata-viewer {
border-radius: 0.5rem;
position: absolute;
top: 0;
left: 0;
width: calc(100% - 2rem);
padding: 0.5rem;
margin-left: 1rem;
background-color: var(--metadata-bg-color);
z-index: 1;
overflow: scroll;
height: calc($app-metadata-height - 1rem);
.current-image-metadata {
grid-area: current-image-preview;
}
.current-image-json-viewer {
border-radius: 0.5rem;
margin: 0 0.5rem 1rem 0.5rem;
padding: 1rem;
overflow-x: scroll;
word-break: break-all;
background-color: var(--metadata-json-bg-color);
.current-image-next-prev-buttons {
grid-area: current-image-content;
display: flex;
justify-content: space-between;
z-index: 1;
height: 100%;
pointer-events: none;
}
.next-prev-button-trigger-area {
width: 7rem;
height: 100%;
width: 15%;
display: grid;
align-items: center;
pointer-events: auto;
&.prev-button-trigger-area {
justify-content: flex-start;
}
&.next-button-trigger-area {
justify-content: flex-end;
}
}
.next-prev-button {
font-size: 4rem;
fill: var(--white);
filter: drop-shadow(0 0 1rem var(--text-color-secondary));
opacity: 70%;
}
.current-image-display-placeholder {
background-color: var(--background-color-secondary);
display: grid;
display: flex;
align-items: center;
justify-content: center;
width: 100%;
height: 100%;
border-radius: 0.5rem;
svg {
width: 10rem;
height: 10rem;
color: var(--svg-color);
}
}

View File

@ -1,10 +1,8 @@
import { Image } from '@chakra-ui/react';
import { useAppSelector } from '../../app/store';
import { RootState } from '../../app/store';
import { useState } from 'react';
import ImageMetadataViewer from './ImageMetadataViewer';
import { RootState, useAppSelector } from '../../app/store';
import CurrentImageButtons from './CurrentImageButtons';
import { MdPhoto } from 'react-icons/md';
import CurrentImagePreview from './CurrentImagePreview';
import ImageMetadataViewer from './ImageMetaDataViewer/ImageMetadataViewer';
/**
* Displays the current image if there is one, plus associated actions.
@ -14,33 +12,24 @@ const CurrentImageDisplay = () => {
(state: RootState) => state.gallery
);
const [shouldShowImageDetails, setShouldShowImageDetails] =
useState<boolean>(false);
const shouldShowImageDetails = useAppSelector(
(state: RootState) => state.options.shouldShowImageDetails
);
const imageToDisplay = intermediateImage || currentImage;
return imageToDisplay ? (
<div className="current-image-display">
<div className="current-image-tools">
<CurrentImageButtons
<CurrentImageButtons image={imageToDisplay} />
</div>
<CurrentImagePreview imageToDisplay={imageToDisplay} />
{shouldShowImageDetails && (
<ImageMetadataViewer
image={imageToDisplay}
shouldShowImageDetails={shouldShowImageDetails}
setShouldShowImageDetails={setShouldShowImageDetails}
styleClass="current-image-metadata"
/>
</div>
<div className="current-image-preview">
<Image
src={imageToDisplay.url}
fit="contain"
maxWidth={'100%'}
maxHeight={'100%'}
/>
{shouldShowImageDetails && (
<div className="current-image-metadata-viewer">
<ImageMetadataViewer image={imageToDisplay} />
</div>
)}
</div>
)}
</div>
) : (
<div className="current-image-display-placeholder">

View File

@ -0,0 +1,105 @@
import { IconButton, Image } from '@chakra-ui/react';
import React, { useState } from 'react';
import { FaAngleLeft, FaAngleRight } from 'react-icons/fa';
import { RootState, useAppDispatch, useAppSelector } from '../../app/store';
import { GalleryState, selectNextImage, selectPrevImage } from './gallerySlice';
import * as InvokeAI from '../../app/invokeai';
import { createSelector } from '@reduxjs/toolkit';
import _ from 'lodash';
const imagesSelector = createSelector(
(state: RootState) => state.gallery,
(gallery: GalleryState) => {
const currentImageIndex = gallery.images.findIndex(
(i) => i.uuid === gallery?.currentImage?.uuid
);
const imagesLength = gallery.images.length;
return {
isOnFirstImage: currentImageIndex === 0,
isOnLastImage:
!isNaN(currentImageIndex) && currentImageIndex === imagesLength - 1,
};
},
{
memoizeOptions: {
resultEqualityCheck: _.isEqual,
},
}
);
interface CurrentImagePreviewProps {
imageToDisplay: InvokeAI.Image;
}
export default function CurrentImagePreview(props: CurrentImagePreviewProps) {
const { imageToDisplay } = props;
const dispatch = useAppDispatch();
const { isOnFirstImage, isOnLastImage } = useAppSelector(imagesSelector);
const shouldShowImageDetails = useAppSelector(
(state: RootState) => state.options.shouldShowImageDetails
);
const [shouldShowNextPrevButtons, setShouldShowNextPrevButtons] =
useState<boolean>(false);
const handleCurrentImagePreviewMouseOver = () => {
setShouldShowNextPrevButtons(true);
};
const handleCurrentImagePreviewMouseOut = () => {
setShouldShowNextPrevButtons(false);
};
const handleClickPrevButton = () => {
dispatch(selectPrevImage());
};
const handleClickNextButton = () => {
dispatch(selectNextImage());
};
return (
<div className="current-image-preview">
<Image
src={imageToDisplay.url}
fit="contain"
maxWidth={'100%'}
maxHeight={'100%'}
/>
{!shouldShowImageDetails && (
<div className="current-image-next-prev-buttons">
<div
className="next-prev-button-trigger-area prev-button-trigger-area"
onMouseOver={handleCurrentImagePreviewMouseOver}
onMouseOut={handleCurrentImagePreviewMouseOut}
>
{shouldShowNextPrevButtons && !isOnFirstImage && (
<IconButton
aria-label="Previous image"
icon={<FaAngleLeft className="next-prev-button" />}
variant="unstyled"
onClick={handleClickPrevButton}
/>
)}
</div>
<div
className="next-prev-button-trigger-area next-button-trigger-area"
onMouseOver={handleCurrentImagePreviewMouseOver}
onMouseOut={handleCurrentImagePreviewMouseOut}
>
{shouldShowNextPrevButtons && !isOnLastImage && (
<IconButton
aria-label="Next image"
icon={<FaAngleRight className="next-prev-button" />}
variant="unstyled"
onClick={handleClickNextButton}
/>
)}
</div>
</div>
)}
</div>
);
}

View File

@ -27,6 +27,7 @@ import { deleteImage } from '../../app/socketio/actions';
import { RootState } from '../../app/store';
import { setShouldConfirmOnDelete, SystemState } from '../system/systemSlice';
import * as InvokeAI from '../../app/invokeai';
import { useHotkeys } from 'react-hotkeys-hook';
interface DeleteImageModalProps {
/**
@ -67,6 +68,14 @@ const DeleteImageModal = forwardRef(
onClose();
};
useHotkeys(
'del',
() => {
shouldConfirmOnDelete ? onOpen() : handleDelete();
},
[image, shouldConfirmOnDelete]
);
const handleChangeShouldConfirmOnDelete = (
e: ChangeEvent<HTMLInputElement>
) => dispatch(setShouldConfirmOnDelete(!e.target.checked));

View File

@ -0,0 +1,59 @@
.hoverable-image {
display: flex;
justify-content: center;
transition: transform 0.2s ease-out;
&:hover {
cursor: pointer;
border-radius: 0.5rem;
z-index: 2;
}
.hoverable-image-image {
width: 100%;
height: 100%;
object-fit: cover;
max-width: 100%;
max-height: 100%;
}
.hoverable-image-content {
display: flex;
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
align-items: center;
justify-content: center;
.hoverable-image-check {
fill: var(--status-good-color);
}
}
.hoverable-image-icons {
position: absolute;
bottom: -2rem;
display: grid;
width: min-content;
grid-template-columns: repeat(2, max-content);
border-radius: 0.4rem;
background-color: var(--background-color-secondary);
padding: 0.2rem;
gap: 0.2rem;
grid-auto-rows: max-content;
button {
width: 12px;
height: 12px;
border-radius: 0.2rem;
padding: 10px 0;
flex-shrink: 2;
svg {
width: 12px;
height: 12px;
}
}
}
}

View File

@ -1,18 +1,15 @@
import {
Box,
Flex,
Icon,
IconButton,
Image,
Tooltip,
useColorModeValue,
} from '@chakra-ui/react';
import { useAppDispatch } from '../../app/store';
import { Box, Icon, IconButton, Image, Tooltip } from '@chakra-ui/react';
import { RootState, useAppDispatch, useAppSelector } from '../../app/store';
import { setCurrentImage } from './gallerySlice';
import { FaCheck, FaSeedling, FaTrashAlt } from 'react-icons/fa';
import { FaCheck, FaImage, FaSeedling, FaTrashAlt } from 'react-icons/fa';
import DeleteImageModal from './DeleteImageModal';
import { memo, SyntheticEvent, useState } from 'react';
import { setAllParameters, setSeed } from '../options/optionsSlice';
import {
setActiveTab,
setAllParameters,
setInitialImagePath,
setSeed,
} from '../options/optionsSlice';
import * as InvokeAI from '../../app/invokeai';
import { IoArrowUndoCircleOutline } from 'react-icons/io5';
@ -33,11 +30,8 @@ const HoverableImage = memo((props: HoverableImageProps) => {
const [isHovered, setIsHovered] = useState<boolean>(false);
const dispatch = useAppDispatch();
const checkColor = useColorModeValue('green.600', 'green.300');
const bgColor = useColorModeValue('gray.200', 'gray.700');
const bgGradient = useColorModeValue(
'radial-gradient(circle, rgba(255,255,255,0.7) 0%, rgba(255,255,255,0.7) 20%, rgba(0,0,0,0) 100%)',
'radial-gradient(circle, rgba(0,0,0,0.7) 0%, rgba(0,0,0,0.7) 20%, rgba(0,0,0,0) 100%)'
const activeTab = useAppSelector(
(state: RootState) => state.options.activeTab
);
const { image, isSelected } = props;
@ -56,84 +50,91 @@ const HoverableImage = memo((props: HoverableImageProps) => {
dispatch(setSeed(image.metadata.image.seed));
};
const handleSetInitImage = (e: SyntheticEvent) => {
e.stopPropagation();
dispatch(setInitialImagePath(image.url));
if (activeTab !== 1) {
dispatch(setActiveTab(1));
}
};
const handleClickImage = () => dispatch(setCurrentImage(image));
return (
<Box position={'relative'} key={uuid}>
<Box
position={'relative'}
key={uuid}
className="hoverable-image"
onMouseOver={handleMouseOver}
onMouseOut={handleMouseOut}
>
<Image
width={120}
height={120}
objectFit="cover"
rounded={'md'}
src={url}
loading={'lazy'}
backgroundColor={bgColor}
className="hoverable-image-image"
/>
<Flex
cursor={'pointer'}
position={'absolute'}
top={0}
left={0}
rounded={'md'}
width="100%"
height="100%"
alignItems={'center'}
justifyContent={'center'}
background={isSelected ? bgGradient : undefined}
onClick={handleClickImage}
onMouseOver={handleMouseOver}
onMouseOut={handleMouseOut}
>
<div className="hoverable-image-content" onClick={handleClickImage}>
{isSelected && (
<Icon fill={checkColor} width={'50%'} height={'50%'} as={FaCheck} />
<Icon
width={'50%'}
height={'50%'}
as={FaCheck}
className="hoverable-image-check"
/>
)}
{isHovered && (
<Flex
direction={'column'}
gap={1}
position={'absolute'}
top={1}
right={1}
>
<Tooltip label={'Delete image'}>
<DeleteImageModal image={image}>
<IconButton
colorScheme="red"
aria-label="Delete image"
icon={<FaTrashAlt />}
size="xs"
variant={'imageHoverIconButton'}
fontSize={14}
/>
</DeleteImageModal>
</div>
{isHovered && (
<div className="hoverable-image-icons">
<Tooltip label={'Delete image'} hasArrow>
<DeleteImageModal image={image}>
<IconButton
colorScheme="red"
aria-label="Delete image"
icon={<FaTrashAlt />}
size="xs"
variant={'imageHoverIconButton'}
fontSize={14}
/>
</DeleteImageModal>
</Tooltip>
{['txt2img', 'img2img'].includes(image?.metadata?.image?.type) && (
<Tooltip label="Use All Parameters" hasArrow>
<IconButton
aria-label="Use All Parameters"
icon={<IoArrowUndoCircleOutline />}
size="xs"
fontSize={18}
variant={'imageHoverIconButton'}
onClickCapture={handleClickSetAllParameters}
/>
</Tooltip>
{['txt2img', 'img2img'].includes(image?.metadata?.image?.type) && (
<Tooltip label="Use all parameters">
<IconButton
aria-label="Use all parameters"
icon={<IoArrowUndoCircleOutline />}
size="xs"
fontSize={18}
variant={'imageHoverIconButton'}
onClickCapture={handleClickSetAllParameters}
/>
</Tooltip>
)}
{image?.metadata?.image?.seed !== undefined && (
<Tooltip label="Use seed">
<IconButton
aria-label="Use seed"
icon={<FaSeedling />}
size="xs"
fontSize={16}
variant={'imageHoverIconButton'}
onClickCapture={handleClickSetSeed}
/>
</Tooltip>
)}
</Flex>
)}
</Flex>
)}
{image?.metadata?.image?.seed !== undefined && (
<Tooltip label="Use Seed" hasArrow>
<IconButton
aria-label="Use Seed"
icon={<FaSeedling />}
size="xs"
fontSize={16}
variant={'imageHoverIconButton'}
onClickCapture={handleClickSetSeed}
/>
</Tooltip>
)}
<Tooltip label="Send To Image To Image" hasArrow>
<IconButton
aria-label="Send To Image To Image"
icon={<FaImage />}
size="xs"
fontSize={16}
variant={'imageHoverIconButton'}
onClickCapture={handleSetInitImage}
/>
</Tooltip>
</div>
)}
</Box>
);
}, memoEqualityCheck);

View File

@ -1,43 +1,90 @@
@use '../../styles/Mixins/' as *;
.image-gallery-container {
display: grid;
.image-gallery-area {
.image-gallery-popup-btn {
position: absolute;
top: 50%;
right: 1rem;
border-radius: 0.5rem 0 0 0.5rem;
padding: 0 0.5rem;
@include Button(
$btn-width: 1rem,
$btn-height: 6rem,
$icon-size: 20px,
$btn-color: var(--btn-grey),
$btn-color-hover: var(--btn-grey-hover)
);
}
}
.image-gallery-popup {
background-color: var(--tab-color);
padding: 1rem;
animation: slideOut 0.3s ease-out;
display: flex;
flex-direction: column;
row-gap: 1rem;
grid-auto-rows: max-content;
min-width: 16rem;
}
.image-gallery-container-placeholder {
display: grid;
background-color: var(--background-color-secondary);
border-radius: 0.5rem;
place-items: center;
padding: 2rem 0;
border-left-width: 0.2rem;
min-width: 300px;
border-color: var(--gallery-resizeable-color);
}
p {
color: var(--subtext-color-bright);
}
.image-gallery-header {
display: flex;
align-items: center;
svg {
width: 5rem;
height: 5rem;
color: var(--svg-color);
h1 {
font-weight: bold;
}
}
.image-gallery {
display: grid;
grid-template-columns: repeat(2, max-content);
gap: 0.6rem;
justify-items: center;
max-height: $app-gallery-height;
.image-gallery-close-btn {
background-color: var(--btn-load-more) !important;
&:hover {
background-color: var(--btn-load-more-hover) !important;
}
}
.image-gallery-container {
display: flex;
flex-direction: column;
gap: 1rem;
max-height: $app-gallery-popover-height;
overflow-y: scroll;
@include HideScrollbar;
}
.masonry-grid {
display: -webkit-box; /* Not needed if autoprefixing */
display: -ms-flexbox; /* Not needed if autoprefixing */
display: flex;
margin-left: 0.5rem; /* gutter size offset */
width: auto;
}
.masonry-grid_column {
padding-left: 0.5rem; /* gutter size */
background-clip: padding-box;
}
/* Style your items */
.masonry-grid_column > .hoverable-image {
/* change div to reference your elements you put in <Masonry> */
background: var(--tab-color);
margin-bottom: 0.5rem;
}
// .image-gallery {
// display: flex;
// grid-template-columns: repeat(auto-fill, minmax(80px, auto));
// gap: 0.5rem;
// justify-items: center;
// }
.image-gallery-load-more-btn {
background-color: var(--btn-load-more) !important;
font-size: 0.85rem !important;
font-family: Inter;
&:disabled {
&:hover {
@ -49,3 +96,22 @@
background-color: var(--btn-load-more-hover) !important;
}
}
.image-gallery-container-placeholder {
display: flex;
background-color: var(--background-color-secondary);
border-radius: 0.5rem;
place-items: center;
padding: 2rem 0;
p {
color: var(--subtext-color-bright);
font-family: Inter;
}
svg {
width: 5rem;
height: 5rem;
color: var(--svg-color);
}
}

View File

@ -1,66 +1,144 @@
import { Button } from '@chakra-ui/react';
import { MdPhotoLibrary } from 'react-icons/md';
import { requestImages } from '../../app/socketio/actions';
import { RootState, useAppDispatch } from '../../app/store';
import { useAppSelector } from '../../app/store';
import HoverableImage from './HoverableImage';
import { Button, IconButton } from '@chakra-ui/button';
import { Resizable } from 're-resizable';
/**
* Simple image gallery.
*/
const ImageGallery = () => {
import React, { useState } from 'react';
import { useHotkeys } from 'react-hotkeys-hook';
import { MdClear, MdPhotoLibrary } from 'react-icons/md';
import Masonry from 'react-masonry-css';
import { requestImages } from '../../app/socketio/actions';
import { RootState, useAppDispatch, useAppSelector } from '../../app/store';
import IAIIconButton from '../../common/components/IAIIconButton';
import { selectNextImage, selectPrevImage } from './gallerySlice';
import HoverableImage from './HoverableImage';
import { setShouldShowGallery } from '../options/optionsSlice';
export default function ImageGallery() {
const { images, currentImageUuid, areMoreImagesAvailable } = useAppSelector(
(state: RootState) => state.gallery
);
const shouldShowGallery = useAppSelector(
(state: RootState) => state.options.shouldShowGallery
);
const activeTab = useAppSelector(
(state: RootState) => state.options.activeTab
);
const dispatch = useAppDispatch();
/**
* I don't like that this needs to rerender whenever the current image is changed.
* What if we have a large number of images? I suppose pagination (planned) will
* mitigate this issue.
*
* TODO: Refactor if performance complaints, or after migrating to new API which supports pagination.
*/
const [column, setColumn] = useState<number | undefined>();
const handleResize = (event: MouseEvent | TouchEvent | any) => {
setColumn(Math.floor((window.innerWidth - event.x) / 120));
};
const handleShowGalleryToggle = () => {
dispatch(setShouldShowGallery(!shouldShowGallery));
};
const handleGalleryClose = () => {
dispatch(setShouldShowGallery(false));
};
const handleClickLoadMore = () => {
dispatch(requestImages());
};
useHotkeys(
'g',
() => {
handleShowGalleryToggle();
},
[shouldShowGallery]
);
useHotkeys(
'left',
() => {
dispatch(selectPrevImage());
},
[]
);
useHotkeys(
'right',
() => {
dispatch(selectNextImage());
},
[]
);
return (
<div className="image-gallery-container">
{images.length ? (
<>
<p>
<strong>Your Invocations</strong>
</p>
<div className="image-gallery">
{images.map((image) => {
const { uuid } = image;
const isSelected = currentImageUuid === uuid;
return (
<HoverableImage
key={uuid}
image={image}
isSelected={isSelected}
/>
);
})}
</div>
</>
) : (
<div className="image-gallery-container-placeholder">
<div className="image-gallery-area">
{!shouldShowGallery && (
<IAIIconButton
tooltip="Show Gallery"
tooltipPlacement="top"
aria-label="Show Gallery"
onClick={handleShowGalleryToggle}
className="image-gallery-popup-btn"
>
<MdPhotoLibrary />
<p>No Images In Gallery</p>
</div>
</IAIIconButton>
)}
{shouldShowGallery && (
<Resizable
defaultSize={{ width: '300', height: '100%' }}
minWidth={'300'}
maxWidth={activeTab == 1 ? '300' : '600'}
className="image-gallery-popup"
onResize={handleResize}
>
{/* <div className="image-gallery-popup"></div> */}
<div className="image-gallery-header">
<h1>Your Invocations</h1>
<IconButton
size={'sm'}
aria-label={'Close Gallery'}
onClick={handleGalleryClose}
className="image-gallery-close-btn"
icon={<MdClear />}
/>
</div>
<div className="image-gallery-container">
{images.length ? (
<Masonry
className="masonry-grid"
columnClassName="masonry-grid_column"
breakpointCols={column}
>
{/* <div className="image-gallery"> */}
{images.map((image) => {
const { uuid } = image;
const isSelected = currentImageUuid === uuid;
return (
<HoverableImage
key={uuid}
image={image}
isSelected={isSelected}
/>
);
})}
{/* </div> */}
</Masonry>
) : (
<div className="image-gallery-container-placeholder">
<MdPhotoLibrary />
<p>No Images In Gallery</p>
</div>
)}
<Button
onClick={handleClickLoadMore}
isDisabled={!areMoreImagesAvailable}
className="image-gallery-load-more-btn"
>
{areMoreImagesAvailable ? 'Load More' : 'All Images Loaded'}
</Button>
</div>
</Resizable>
)}
<Button
onClick={handleClickLoadMore}
isDisabled={!areMoreImagesAvailable}
className="image-gallery-load-more-btn"
>
{areMoreImagesAvailable ? 'Load More' : 'All Images Loaded'}
</Button>
</div>
);
};
export default ImageGallery;
}

View File

@ -0,0 +1,129 @@
import {
Button,
Drawer,
DrawerBody,
DrawerCloseButton,
DrawerContent,
DrawerHeader,
useDisclosure,
} from '@chakra-ui/react';
import React from 'react';
import { useHotkeys } from 'react-hotkeys-hook';
import { MdPhotoLibrary } from 'react-icons/md';
import { requestImages } from '../../app/socketio/actions';
import { RootState, useAppDispatch } from '../../app/store';
import { useAppSelector } from '../../app/store';
import { selectNextImage, selectPrevImage } from './gallerySlice';
import HoverableImage from './HoverableImage';
/**
* Simple image gallery.
*/
const ImageGalleryOld = () => {
const { images, currentImageUuid, areMoreImagesAvailable } = useAppSelector(
(state: RootState) => state.gallery
);
const dispatch = useAppDispatch();
const { isOpen, onOpen, onClose } = useDisclosure();
/**
* I don't like that this needs to rerender whenever the current image is changed.
* What if we have a large number of images? I suppose pagination (planned) will
* mitigate this issue.
*
* TODO: Refactor if performance complaints, or after migrating to new API which supports pagination.
*/
const handleClickLoadMore = () => {
dispatch(requestImages());
};
useHotkeys(
'g',
() => {
if (isOpen) {
onClose();
} else {
onOpen();
}
},
[isOpen]
);
useHotkeys(
'left',
() => {
dispatch(selectPrevImage());
},
[]
);
useHotkeys(
'right',
() => {
dispatch(selectNextImage());
},
[]
);
return (
<div className="image-gallery-area">
<Button
colorScheme="teal"
onClick={onOpen}
className="image-gallery-popup-btn"
>
<MdPhotoLibrary />
</Button>
<Drawer
isOpen={isOpen}
placement="right"
onClose={onClose}
autoFocus={false}
trapFocus={false}
closeOnOverlayClick={false}
>
<DrawerContent className="image-gallery-popup">
<div className="image-gallery-header">
<DrawerHeader>Your Invocations</DrawerHeader>
<DrawerCloseButton />
</div>
<DrawerBody className="image-gallery-body">
<div className="image-gallery-container">
{images.length ? (
<div className="image-gallery">
{images.map((image) => {
const { uuid } = image;
const isSelected = currentImageUuid === uuid;
return (
<HoverableImage
key={uuid}
image={image}
isSelected={isSelected}
/>
);
})}
</div>
) : (
<div className="image-gallery-container-placeholder">
<MdPhotoLibrary />
<p>No Images In Gallery</p>
</div>
)}
<Button
onClick={handleClickLoadMore}
isDisabled={!areMoreImagesAvailable}
className="image-gallery-load-more-btn"
>
{areMoreImagesAvailable ? 'Load More' : 'All Images Loaded'}
</Button>
</div>
</DrawerBody>
</DrawerContent>
</Drawer>
</div>
);
};
export default ImageGalleryOld;

View File

@ -0,0 +1,20 @@
@use '../../../styles/Mixins/' as *;
.image-metadata-viewer {
width: 100%;
border-radius: 0.5rem;
padding: 1rem;
background-color: var(--metadata-bg-color);
overflow: scroll;
max-height: $app-metadata-height;
z-index: 10;
}
.image-json-viewer {
border-radius: 0.5rem;
margin: 0 0.5rem 1rem 0.5rem;
padding: 1rem;
overflow-x: scroll;
word-break: break-all;
background-color: var(--metadata-json-bg-color);
}

View File

@ -0,0 +1,406 @@
import {
Center,
Flex,
Heading,
IconButton,
Link,
Text,
Tooltip,
} from '@chakra-ui/react';
import { ExternalLinkIcon } from '@chakra-ui/icons';
import { memo } from 'react';
import { IoArrowUndoCircleOutline } from 'react-icons/io5';
import { useAppDispatch } from '../../../app/store';
import * as InvokeAI from '../../../app/invokeai';
import {
setCfgScale,
setFacetoolStrength,
setCodeformerFidelity,
setFacetoolType,
setHeight,
setHiresFix,
setImg2imgStrength,
setInitialImagePath,
setMaskPath,
setPrompt,
setSampler,
setSeamless,
setSeed,
setSeedWeights,
setShouldFitToWidthHeight,
setSteps,
setUpscalingLevel,
setUpscalingStrength,
setWidth,
} from '../../options/optionsSlice';
import promptToString from '../../../common/util/promptToString';
import { seedWeightsToString } from '../../../common/util/seedWeightPairs';
import { FaCopy } from 'react-icons/fa';
type MetadataItemProps = {
isLink?: boolean;
label: string;
onClick?: () => void;
value: number | string | boolean;
labelPosition?: string;
};
/**
* Component to display an individual metadata item or parameter.
*/
const MetadataItem = ({
label,
value,
onClick,
isLink,
labelPosition,
}: MetadataItemProps) => {
return (
<Flex gap={2}>
{onClick && (
<Tooltip label={`Recall ${label}`}>
<IconButton
aria-label="Use this parameter"
icon={<IoArrowUndoCircleOutline />}
size={'xs'}
variant={'ghost'}
fontSize={20}
onClick={onClick}
/>
</Tooltip>
)}
<Flex direction={labelPosition ? 'column' : 'row'}>
<Text fontWeight={'semibold'} whiteSpace={'pre-wrap'} pr={2}>
{label}:
</Text>
{isLink ? (
<Link href={value.toString()} isExternal wordBreak={'break-all'}>
{value.toString()} <ExternalLinkIcon mx="2px" />
</Link>
) : (
<Text overflowY={'scroll'} wordBreak={'break-all'}>
{value.toString()}
</Text>
)}
</Flex>
</Flex>
);
};
type ImageMetadataViewerProps = {
image: InvokeAI.Image;
styleClass?: string;
};
// TODO: I don't know if this is needed.
const memoEqualityCheck = (
prev: ImageMetadataViewerProps,
next: ImageMetadataViewerProps
) => prev.image.uuid === next.image.uuid;
// TODO: Show more interesting information in this component.
/**
* Image metadata viewer overlays currently selected image and provides
* access to any of its metadata for use in processing.
*/
const ImageMetadataViewer = memo(
({ image, styleClass }: ImageMetadataViewerProps) => {
const dispatch = useAppDispatch();
// const jsonBgColor = useColorModeValue('blackAlpha.100', 'whiteAlpha.100');
const metadata = image?.metadata?.image || {};
const {
type,
postprocessing,
sampler,
prompt,
seed,
variations,
steps,
cfg_scale,
seamless,
hires_fix,
width,
height,
strength,
fit,
init_image_path,
mask_image_path,
orig_path,
scale,
} = metadata;
const metadataJSON = JSON.stringify(metadata, null, 2);
return (
<div className={`image-metadata-viewer ${styleClass}`}>
<Flex gap={1} direction={'column'} width={'100%'}>
<Flex gap={2}>
<Text fontWeight={'semibold'}>File:</Text>
<Link href={image.url} isExternal>
{image.url}
<ExternalLinkIcon mx="2px" />
</Link>
</Flex>
{Object.keys(metadata).length > 0 ? (
<>
{type && <MetadataItem label="Generation type" value={type} />}
{['esrgan', 'gfpgan'].includes(type) && (
<MetadataItem label="Original image" value={orig_path} />
)}
{type === 'gfpgan' && strength !== undefined && (
<MetadataItem
label="Fix faces strength"
value={strength}
onClick={() => dispatch(setFacetoolStrength(strength))}
/>
)}
{type === 'esrgan' && scale !== undefined && (
<MetadataItem
label="Upscaling scale"
value={scale}
onClick={() => dispatch(setUpscalingLevel(scale))}
/>
)}
{type === 'esrgan' && strength !== undefined && (
<MetadataItem
label="Upscaling strength"
value={strength}
onClick={() => dispatch(setUpscalingStrength(strength))}
/>
)}
{prompt && (
<MetadataItem
label="Prompt"
labelPosition="top"
value={promptToString(prompt)}
onClick={() => dispatch(setPrompt(prompt))}
/>
)}
{seed !== undefined && (
<MetadataItem
label="Seed"
value={seed}
onClick={() => dispatch(setSeed(seed))}
/>
)}
{sampler && (
<MetadataItem
label="Sampler"
value={sampler}
onClick={() => dispatch(setSampler(sampler))}
/>
)}
{steps && (
<MetadataItem
label="Steps"
value={steps}
onClick={() => dispatch(setSteps(steps))}
/>
)}
{cfg_scale !== undefined && (
<MetadataItem
label="CFG scale"
value={cfg_scale}
onClick={() => dispatch(setCfgScale(cfg_scale))}
/>
)}
{variations && variations.length > 0 && (
<MetadataItem
label="Seed-weight pairs"
value={seedWeightsToString(variations)}
onClick={() =>
dispatch(setSeedWeights(seedWeightsToString(variations)))
}
/>
)}
{seamless && (
<MetadataItem
label="Seamless"
value={seamless}
onClick={() => dispatch(setSeamless(seamless))}
/>
)}
{hires_fix && (
<MetadataItem
label="High Resolution Optimization"
value={hires_fix}
onClick={() => dispatch(setHiresFix(hires_fix))}
/>
)}
{width && (
<MetadataItem
label="Width"
value={width}
onClick={() => dispatch(setWidth(width))}
/>
)}
{height && (
<MetadataItem
label="Height"
value={height}
onClick={() => dispatch(setHeight(height))}
/>
)}
{init_image_path && (
<MetadataItem
label="Initial image"
value={init_image_path}
isLink
onClick={() => dispatch(setInitialImagePath(init_image_path))}
/>
)}
{mask_image_path && (
<MetadataItem
label="Mask image"
value={mask_image_path}
isLink
onClick={() => dispatch(setMaskPath(mask_image_path))}
/>
)}
{type === 'img2img' && strength && (
<MetadataItem
label="Image to image strength"
value={strength}
onClick={() => dispatch(setImg2imgStrength(strength))}
/>
)}
{fit && (
<MetadataItem
label="Image to image fit"
value={fit}
onClick={() => dispatch(setShouldFitToWidthHeight(fit))}
/>
)}
{postprocessing && postprocessing.length > 0 && (
<>
<Heading size={'sm'}>Postprocessing</Heading>
{postprocessing.map(
(
postprocess: InvokeAI.PostProcessedImageMetadata,
i: number
) => {
if (postprocess.type === 'esrgan') {
const { scale, strength } = postprocess;
return (
<Flex
key={i}
pl={'2rem'}
gap={1}
direction={'column'}
>
<Text size={'md'}>{`${
i + 1
}: Upscale (ESRGAN)`}</Text>
<MetadataItem
label="Scale"
value={scale}
onClick={() => dispatch(setUpscalingLevel(scale))}
/>
<MetadataItem
label="Strength"
value={strength}
onClick={() =>
dispatch(setUpscalingStrength(strength))
}
/>
</Flex>
);
} else if (postprocess.type === 'gfpgan') {
const { strength } = postprocess;
return (
<Flex
key={i}
pl={'2rem'}
gap={1}
direction={'column'}
>
<Text size={'md'}>{`${
i + 1
}: Face restoration (GFPGAN)`}</Text>
<MetadataItem
label="Strength"
value={strength}
onClick={() => {
dispatch(setFacetoolStrength(strength));
dispatch(setFacetoolType('gfpgan'));
}}
/>
</Flex>
);
} else if (postprocess.type === 'codeformer') {
const { strength, fidelity } = postprocess;
return (
<Flex
key={i}
pl={'2rem'}
gap={1}
direction={'column'}
>
<Text size={'md'}>{`${
i + 1
}: Face restoration (Codeformer)`}</Text>
<MetadataItem
label="Strength"
value={strength}
onClick={() => {
dispatch(setFacetoolStrength(strength));
dispatch(setFacetoolType('codeformer'));
}}
/>
{fidelity && (
<MetadataItem
label="Fidelity"
value={fidelity}
onClick={() => {
dispatch(setCodeformerFidelity(fidelity));
dispatch(setFacetoolType('codeformer'));
}}
/>
)}
</Flex>
);
}
}
)}
</>
)}
<Flex gap={2} direction={'column'}>
<Flex gap={2}>
<Tooltip label={`Copy metadata JSON`}>
<IconButton
aria-label="Copy metadata JSON"
icon={<FaCopy />}
size={'xs'}
variant={'ghost'}
fontSize={14}
onClick={() =>
navigator.clipboard.writeText(metadataJSON)
}
/>
</Tooltip>
<Text fontWeight={'semibold'}>Metadata JSON:</Text>
</Flex>
<div className={'image-json-viewer'}>
<pre>{metadataJSON}</pre>
</div>
</Flex>
</>
) : (
<Center width={'100%'} pt={10}>
<Text fontSize={'lg'} fontWeight="semibold">
No metadata available
</Text>
</Center>
)}
</Flex>
</div>
);
},
memoEqualityCheck
);
export default ImageMetadataViewer;

View File

@ -1,338 +0,0 @@
import {
Center,
Flex,
Heading,
IconButton,
Link,
Text,
Tooltip,
} from '@chakra-ui/react';
import { ExternalLinkIcon } from '@chakra-ui/icons';
import { memo } from 'react';
import { IoArrowUndoCircleOutline } from 'react-icons/io5';
import { useAppDispatch } from '../../app/store';
import * as InvokeAI from '../../app/invokeai';
import {
setCfgScale,
setGfpganStrength,
setHeight,
setImg2imgStrength,
setInitialImagePath,
setMaskPath,
setPrompt,
setSampler,
setSeed,
setSeedWeights,
setShouldFitToWidthHeight,
setSteps,
setUpscalingLevel,
setUpscalingStrength,
setWidth,
} from '../options/optionsSlice';
import promptToString from '../../common/util/promptToString';
import { seedWeightsToString } from '../../common/util/seedWeightPairs';
import { FaCopy } from 'react-icons/fa';
type MetadataItemProps = {
isLink?: boolean;
label: string;
onClick?: () => void;
value: number | string | boolean;
labelPosition?: string;
};
/**
* Component to display an individual metadata item or parameter.
*/
const MetadataItem = ({
label,
value,
onClick,
isLink,
labelPosition,
}: MetadataItemProps) => {
return (
<Flex gap={2}>
{onClick && (
<Tooltip label={`Recall ${label}`}>
<IconButton
aria-label="Use this parameter"
icon={<IoArrowUndoCircleOutline />}
size={'xs'}
variant={'ghost'}
fontSize={20}
onClick={onClick}
/>
</Tooltip>
)}
<Flex direction={labelPosition ? 'column' : 'row'}>
<Text fontWeight={'semibold'} whiteSpace={'nowrap'} pr={2}>
{label}:
</Text>
{isLink ? (
<Link href={value.toString()} isExternal wordBreak={'break-all'}>
{value.toString()} <ExternalLinkIcon mx="2px" />
</Link>
) : (
<Text overflowY={'scroll'} wordBreak={'break-all'}>
{value.toString()}
</Text>
)}
</Flex>
</Flex>
);
};
type ImageMetadataViewerProps = {
image: InvokeAI.Image;
};
// TODO: I don't know if this is needed.
const memoEqualityCheck = (
prev: ImageMetadataViewerProps,
next: ImageMetadataViewerProps
) => prev.image.uuid === next.image.uuid;
// TODO: Show more interesting information in this component.
/**
* Image metadata viewer overlays currently selected image and provides
* access to any of its metadata for use in processing.
*/
const ImageMetadataViewer = memo(({ image }: ImageMetadataViewerProps) => {
const dispatch = useAppDispatch();
// const jsonBgColor = useColorModeValue('blackAlpha.100', 'whiteAlpha.100');
const metadata = image?.metadata?.image || {};
const {
type,
postprocessing,
sampler,
prompt,
seed,
variations,
steps,
cfg_scale,
seamless,
width,
height,
strength,
fit,
init_image_path,
mask_image_path,
orig_path,
scale,
} = metadata;
const metadataJSON = JSON.stringify(metadata, null, 2);
return (
<Flex gap={1} direction={'column'} width={'100%'}>
<Flex gap={2}>
<Text fontWeight={'semibold'}>File:</Text>
<Link href={image.url} isExternal>
{image.url}
<ExternalLinkIcon mx="2px" />
</Link>
</Flex>
{Object.keys(metadata).length > 0 ? (
<>
{type && <MetadataItem label="Generation type" value={type} />}
{['esrgan', 'gfpgan'].includes(type) && (
<MetadataItem label="Original image" value={orig_path} />
)}
{type === 'gfpgan' && strength !== undefined && (
<MetadataItem
label="Fix faces strength"
value={strength}
onClick={() => dispatch(setGfpganStrength(strength))}
/>
)}
{type === 'esrgan' && scale !== undefined && (
<MetadataItem
label="Upscaling scale"
value={scale}
onClick={() => dispatch(setUpscalingLevel(scale))}
/>
)}
{type === 'esrgan' && strength !== undefined && (
<MetadataItem
label="Upscaling strength"
value={strength}
onClick={() => dispatch(setUpscalingStrength(strength))}
/>
)}
{prompt && (
<MetadataItem
label="Prompt"
labelPosition="top"
value={promptToString(prompt)}
onClick={() => dispatch(setPrompt(prompt))}
/>
)}
{seed !== undefined && (
<MetadataItem
label="Seed"
value={seed}
onClick={() => dispatch(setSeed(seed))}
/>
)}
{sampler && (
<MetadataItem
label="Sampler"
value={sampler}
onClick={() => dispatch(setSampler(sampler))}
/>
)}
{steps && (
<MetadataItem
label="Steps"
value={steps}
onClick={() => dispatch(setSteps(steps))}
/>
)}
{cfg_scale !== undefined && (
<MetadataItem
label="CFG scale"
value={cfg_scale}
onClick={() => dispatch(setCfgScale(cfg_scale))}
/>
)}
{variations && variations.length > 0 && (
<MetadataItem
label="Seed-weight pairs"
value={seedWeightsToString(variations)}
onClick={() =>
dispatch(setSeedWeights(seedWeightsToString(variations)))
}
/>
)}
{seamless && (
<MetadataItem
label="Seamless"
value={seamless}
onClick={() => dispatch(setWidth(seamless))}
/>
)}
{width && (
<MetadataItem
label="Width"
value={width}
onClick={() => dispatch(setWidth(width))}
/>
)}
{height && (
<MetadataItem
label="Height"
value={height}
onClick={() => dispatch(setHeight(height))}
/>
)}
{init_image_path && (
<MetadataItem
label="Initial image"
value={init_image_path}
isLink
onClick={() => dispatch(setInitialImagePath(init_image_path))}
/>
)}
{mask_image_path && (
<MetadataItem
label="Mask image"
value={mask_image_path}
isLink
onClick={() => dispatch(setMaskPath(mask_image_path))}
/>
)}
{type === 'img2img' && strength && (
<MetadataItem
label="Image to image strength"
value={strength}
onClick={() => dispatch(setImg2imgStrength(strength))}
/>
)}
{fit && (
<MetadataItem
label="Image to image fit"
value={fit}
onClick={() => dispatch(setShouldFitToWidthHeight(fit))}
/>
)}
{postprocessing && postprocessing.length > 0 && (
<>
<Heading size={'sm'}>Postprocessing</Heading>
{postprocessing.map(
(
postprocess: InvokeAI.PostProcessedImageMetadata,
i: number
) => {
if (postprocess.type === 'esrgan') {
const { scale, strength } = postprocess;
return (
<Flex key={i} pl={'2rem'} gap={1} direction={'column'}>
<Text size={'md'}>{`${i + 1}: Upscale (ESRGAN)`}</Text>
<MetadataItem
label="Scale"
value={scale}
onClick={() => dispatch(setUpscalingLevel(scale))}
/>
<MetadataItem
label="Strength"
value={strength}
onClick={() =>
dispatch(setUpscalingStrength(strength))
}
/>
</Flex>
);
} else if (postprocess.type === 'gfpgan') {
const { strength } = postprocess;
return (
<Flex key={i} pl={'2rem'} gap={1} direction={'column'}>
<Text size={'md'}>{`${
i + 1
}: Face restoration (GFPGAN)`}</Text>
<MetadataItem
label="Strength"
value={strength}
onClick={() => dispatch(setGfpganStrength(strength))}
/>
</Flex>
);
}
}
)}
</>
)}
<Flex gap={2} direction={'column'}>
<Flex gap={2}>
<Tooltip label={`Copy metadata JSON`}>
<IconButton
aria-label="Copy metadata JSON"
icon={<FaCopy />}
size={'xs'}
variant={'ghost'}
fontSize={14}
onClick={() => navigator.clipboard.writeText(metadataJSON)}
/>
</Tooltip>
<Text fontWeight={'semibold'}>Metadata JSON:</Text>
</Flex>
<div className={'current-image-json-viewer'}>
<pre>{metadataJSON}</pre>
</div>
</Flex>
</>
) : (
<Center width={'100%'} pt={10}>
<Text fontSize={'lg'} fontWeight="semibold">
No metadata available
</Text>
</Center>
)}
</Flex>
);
}, memoEqualityCheck);
export default ImageMetadataViewer;

View File

@ -15,9 +15,9 @@
background: var(--tab-panel-bg);
border-radius: 0 0 0.4rem 0.4rem;
border: 2px solid var(--tab-hover-color);
padding: .75rem 1rem .75rem 1rem;
padding: 0.75rem 1rem 0.75rem 1rem;
display: grid;
grid-template-rows: repeat(auto-fill, 1fr);
grid-auto-rows: max-content;
grid-row-gap: 0.5rem;
justify-content: space-between;
}

View File

@ -1,6 +1,6 @@
import { createSlice } from '@reduxjs/toolkit';
import type { PayloadAction } from '@reduxjs/toolkit';
import { clamp } from 'lodash';
import _, { clamp } from 'lodash';
import * as InvokeAI from '../../app/invokeai';
export interface GalleryState {
@ -72,7 +72,13 @@ export const gallerySlice = createSlice({
},
addImage: (state, action: PayloadAction<InvokeAI.Image>) => {
const newImage = action.payload;
const { uuid, mtime } = newImage;
const { uuid, url, mtime } = newImage;
// Do not add duplicate images
if (state.images.find((i) => i.url === url && i.mtime === mtime)) {
return;
}
state.images.unshift(newImage);
state.currentImageUuid = uuid;
state.intermediateImage = undefined;
@ -85,6 +91,32 @@ export const gallerySlice = createSlice({
clearIntermediateImage: (state) => {
state.intermediateImage = undefined;
},
selectNextImage: (state) => {
const { images, currentImage } = state;
if (currentImage) {
const currentImageIndex = images.findIndex(
(i) => i.uuid === currentImage.uuid
);
if (_.inRange(currentImageIndex, 0, images.length)) {
const newCurrentImage = images[currentImageIndex + 1];
state.currentImage = newCurrentImage;
state.currentImageUuid = newCurrentImage.uuid;
}
}
},
selectPrevImage: (state) => {
const { images, currentImage } = state;
if (currentImage) {
const currentImageIndex = images.findIndex(
(i) => i.uuid === currentImage.uuid
);
if (_.inRange(currentImageIndex, 1, images.length + 1)) {
const newCurrentImage = images[currentImageIndex - 1];
state.currentImage = newCurrentImage;
state.currentImageUuid = newCurrentImage.uuid;
}
}
},
addGalleryImages: (
state,
action: PayloadAction<{
@ -94,8 +126,15 @@ export const gallerySlice = createSlice({
) => {
const { images, areMoreImagesAvailable } = action.payload;
if (images.length > 0) {
// Filter images that already exist in the gallery
const newImages = images.filter(
(newImage) =>
!state.images.find(
(i) => i.url === newImage.url && i.mtime === newImage.mtime
)
);
state.images = state.images
.concat(images)
.concat(newImages)
.sort((a, b) => b.mtime - a.mtime);
if (!state.currentImage) {
@ -122,6 +161,8 @@ export const {
setCurrentImage,
addGalleryImages,
setIntermediateImage,
selectNextImage,
selectPrevImage,
} = gallerySlice.actions;
export default gallerySlice.reducer;

View File

@ -8,7 +8,7 @@ import React, { ReactElement } from 'react';
import { Feature } from '../../../app/features';
import GuideIcon from '../../../common/components/GuideIcon';
interface InvokeAccordionItemProps {
export interface InvokeAccordionItemProps {
header: ReactElement;
feature: Feature;
options: ReactElement;

Some files were not shown because too many files have changed in this diff Show More