Merge branch 'development' into model-switching
4
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
ldm/invoke/pngwriter.py @CapableWeb
|
||||
ldm/invoke/server_legacy.py @CapableWeb
|
||||
scripts/legacy_api.py @CapableWeb
|
||||
tests/legacy_tests.sh @CapableWeb
|
6
.github/workflows/mkdocs-flow.yml
vendored
@ -3,9 +3,9 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
# pull_request:
|
||||
# branches:
|
||||
# - main
|
||||
jobs:
|
||||
build:
|
||||
name: Deploy docs to GitHub Pages
|
||||
|
2
LICENSE
@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
|
||||
Copyright (c) 2022 Lincoln Stein and InvokeAI Organization
|
||||
|
||||
This software is derived from a fork of the source code available from
|
||||
https://github.com/pesser/stable-diffusion and
|
||||
|
77
README.md
@ -2,14 +2,7 @@
|
||||
|
||||
# InvokeAI: A Stable Diffusion Toolkit
|
||||
|
||||
_Note: This fork is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to
|
||||
report bugs and make feature requests. Be sure to use the provided
|
||||
templates. They will help aid diagnose issues faster._
|
||||
|
||||
_This repository was formally known as lstein/stable-diffusion_
|
||||
|
||||
# **Table of Contents**
|
||||
_Formerly known as lstein/stable-diffusion_
|
||||
|
||||
![project logo](docs/assets/logo.png)
|
||||
|
||||
@ -46,8 +39,13 @@ This is a fork of
|
||||
the open source text-to-image generator. It provides a streamlined
|
||||
process with various new features and options to aid the image
|
||||
generation process. It runs on Windows, Mac and Linux machines, with
|
||||
GPU cards with as little as 4 GB or RAM. It provides both a polished
|
||||
Web interface, and an easy-to-use command-line interface.
|
||||
GPU cards with as little as 4 GB of RAM. It provides both a polished
|
||||
Web interface (see below), and an easy-to-use command-line interface.
|
||||
|
||||
**Quick links**: [<a href="https://discord.gg/NwVCmKwY">Discord Server</a>] [<a href="https://invoke-ai.github.io/InvokeAI/">Documentation and Tutorials</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
|
||||
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
||||
|
||||
|
||||
_Note: This fork is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
@ -91,7 +89,7 @@ You wil need one of the following:
|
||||
|
||||
#### Disk
|
||||
|
||||
- At least 6 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||
|
||||
**Note**
|
||||
|
||||
@ -136,39 +134,38 @@ you can try starting `invoke.py` with the `--precision=float32` flag:
|
||||
|
||||
### Latest Changes
|
||||
|
||||
- vNEXT (TODO 2022)
|
||||
- v2.0.1 (13 October 2022)
|
||||
- fix noisy images at high step count when using k* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than
|
||||
via a new python process (which could break the environment)
|
||||
|
||||
- v2.0.0 (9 October 2022)
|
||||
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
for backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/INPAINTING.md">inpainting</a> and <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OUTPAINTING.md">outpainting</a>
|
||||
- img2img runs on all k* samplers
|
||||
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/PROMPTS.md#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/POSTPROCESS.md">post-processing of previously-generated images</a>
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.md#this-is-an-example-of-txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.md">command-line completion behavior</a>.
|
||||
New commands added:
|
||||
* List command-line history with `!history`
|
||||
* Search command-line history with `!search`
|
||||
* Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
|
||||
- v1.14 (11 September 2022)
|
||||
|
||||
- Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs.
|
||||
- Full support for Apple hardware with M1 or M2 chips.
|
||||
- Add "seamless mode" for circular tiling of image. Generates beautiful effects.
|
||||
([prixt](https://github.com/prixt)).
|
||||
- Inpainting support.
|
||||
- Improved web server GUI.
|
||||
- Lots of code and documentation cleanups.
|
||||
|
||||
- v1.13 (3 September 2022
|
||||
|
||||
- Support image variations (see [VARIATIONS](docs/features/VARIATIONS.md)
|
||||
([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers)
|
||||
- Supports a Google Colab notebook for a standalone server running on Google hardware
|
||||
[Arturo Mendivil](https://github.com/artmen1516)
|
||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- WebUI supports incremental display of in-progress images during generation
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- A new configuration file scheme that allows new models (including upcoming
|
||||
stable-diffusion-v1.5) to be added without altering the code.
|
||||
([David Wager](https://github.com/maddavid12))
|
||||
- Can specify --grid on invoke.py command line as the default.
|
||||
- Miscellaneous internal bug and stability fixes.
|
||||
- Works on M1 Apple hardware.
|
||||
- Multiple bug fixes.
|
||||
|
||||
For older changelogs, please visit the **[CHANGELOG](docs/features/CHANGELOG.md)**.
|
||||
|
||||
### Troubleshooting
|
||||
|
@ -257,14 +257,14 @@ class InvokeAIWebServer:
|
||||
|
||||
@socketio.on('generateImage')
|
||||
def handle_generate_image_event(
|
||||
generation_parameters, esrgan_parameters, gfpgan_parameters
|
||||
generation_parameters, esrgan_parameters, facetool_parameters
|
||||
):
|
||||
try:
|
||||
print(
|
||||
f'>> Image generation requested: {generation_parameters}\nESRGAN parameters: {esrgan_parameters}\nGFPGAN parameters: {gfpgan_parameters}'
|
||||
f'>> Image generation requested: {generation_parameters}\nESRGAN parameters: {esrgan_parameters}\nFacetool parameters: {facetool_parameters}'
|
||||
)
|
||||
self.generate_images(
|
||||
generation_parameters, esrgan_parameters, gfpgan_parameters
|
||||
generation_parameters, esrgan_parameters, facetool_parameters
|
||||
)
|
||||
except Exception as e:
|
||||
self.socketio.emit('error', {'message': (str(e))})
|
||||
@ -300,9 +300,11 @@ class InvokeAIWebServer:
|
||||
)
|
||||
|
||||
if postprocessing_parameters['type'] == 'esrgan':
|
||||
progress.set_current_status('Upscaling')
|
||||
progress.set_current_status('Upscaling (ESRGAN)')
|
||||
elif postprocessing_parameters['type'] == 'gfpgan':
|
||||
progress.set_current_status('Restoring Faces')
|
||||
progress.set_current_status('Restoring Faces (GFPGAN)')
|
||||
elif postprocessing_parameters['type'] == 'codeformer':
|
||||
progress.set_current_status('Restoring Faces (Codeformer)')
|
||||
|
||||
socketio.emit('progressUpdate', progress.to_formatted_dict())
|
||||
eventlet.sleep(0)
|
||||
@ -322,6 +324,14 @@ class InvokeAIWebServer:
|
||||
strength=postprocessing_parameters['facetool_strength'],
|
||||
seed=seed,
|
||||
)
|
||||
elif postprocessing_parameters['type'] == 'codeformer':
|
||||
image = self.codeformer.process(
|
||||
image=image,
|
||||
strength=postprocessing_parameters['facetool_strength'],
|
||||
fidelity=postprocessing_parameters['codeformer_fidelity'],
|
||||
seed=seed,
|
||||
device='cpu' if str(self.generate.device) == 'mps' else self.generate.device
|
||||
)
|
||||
else:
|
||||
raise TypeError(
|
||||
f'{postprocessing_parameters["type"]} is not a valid postprocessing type'
|
||||
@ -448,7 +458,7 @@ class InvokeAIWebServer:
|
||||
}
|
||||
|
||||
def generate_images(
|
||||
self, generation_parameters, esrgan_parameters, gfpgan_parameters
|
||||
self, generation_parameters, esrgan_parameters, facetool_parameters
|
||||
):
|
||||
try:
|
||||
self.canceled.clear()
|
||||
@ -551,7 +561,7 @@ class InvokeAIWebServer:
|
||||
|
||||
nonlocal generation_parameters
|
||||
nonlocal esrgan_parameters
|
||||
nonlocal gfpgan_parameters
|
||||
nonlocal facetool_parameters
|
||||
nonlocal progress
|
||||
|
||||
step_index = 1
|
||||
@ -611,23 +621,41 @@ class InvokeAIWebServer:
|
||||
if self.canceled.is_set():
|
||||
raise CanceledException
|
||||
|
||||
if gfpgan_parameters:
|
||||
progress.set_current_status('Restoring Faces')
|
||||
if facetool_parameters:
|
||||
if facetool_parameters['type'] == 'gfpgan':
|
||||
progress.set_current_status('Restoring Faces (GFPGAN)')
|
||||
elif facetool_parameters['type'] == 'codeformer':
|
||||
progress.set_current_status('Restoring Faces (Codeformer)')
|
||||
|
||||
progress.set_current_status_has_steps(False)
|
||||
self.socketio.emit(
|
||||
'progressUpdate', progress.to_formatted_dict()
|
||||
)
|
||||
eventlet.sleep(0)
|
||||
|
||||
image = self.gfpgan.process(
|
||||
image=image,
|
||||
strength=gfpgan_parameters['strength'],
|
||||
seed=seed,
|
||||
)
|
||||
if facetool_parameters['type'] == 'gfpgan':
|
||||
image = self.gfpgan.process(
|
||||
image=image,
|
||||
strength=facetool_parameters['strength'],
|
||||
seed=seed,
|
||||
)
|
||||
elif facetool_parameters['type'] == 'codeformer':
|
||||
image = self.codeformer.process(
|
||||
image=image,
|
||||
strength=facetool_parameters['strength'],
|
||||
fidelity=facetool_parameters['codeformer_fidelity'],
|
||||
seed=seed,
|
||||
device='cpu' if str(self.generate.device) == 'mps' else self.generate.device,
|
||||
)
|
||||
all_parameters['codeformer_fidelity'] = facetool_parameters['codeformer_fidelity']
|
||||
|
||||
postprocessing = True
|
||||
all_parameters['facetool_strength'] = gfpgan_parameters[
|
||||
all_parameters['facetool_strength'] = facetool_parameters[
|
||||
'strength'
|
||||
]
|
||||
all_parameters['facetool_type'] = facetool_parameters[
|
||||
'type'
|
||||
]
|
||||
|
||||
progress.set_current_status('Saving Image')
|
||||
self.socketio.emit(
|
||||
@ -737,13 +765,15 @@ class InvokeAIWebServer:
|
||||
|
||||
# 'postprocessing' is either null or an
|
||||
if 'facetool_strength' in parameters:
|
||||
|
||||
postprocessing.append(
|
||||
{
|
||||
'type': 'gfpgan',
|
||||
facetool_parameters = {
|
||||
'type': str(parameters['facetool_type']),
|
||||
'strength': float(parameters['facetool_strength']),
|
||||
}
|
||||
)
|
||||
|
||||
if parameters['facetool_type'] == 'codeformer':
|
||||
facetool_parameters['fidelity'] = float(parameters['codeformer_fidelity'])
|
||||
|
||||
postprocessing.append(facetool_parameters)
|
||||
|
||||
if 'upscale' in parameters:
|
||||
postprocessing.append(
|
||||
@ -762,7 +792,7 @@ class InvokeAIWebServer:
|
||||
rfc_dict['sampler'] = parameters['sampler_name']
|
||||
|
||||
# display weighted subprompts (liable to change)
|
||||
subprompts = split_weighted_subprompts(parameters['prompt'])
|
||||
subprompts = split_weighted_subprompts(parameters['prompt'], skip_normalize=True)
|
||||
subprompts = [{'prompt': x[0], 'weight': x[1]} for x in subprompts]
|
||||
rfc_dict['prompt'] = subprompts
|
||||
|
||||
@ -840,6 +870,13 @@ class InvokeAIWebServer:
|
||||
postprocessing_metadata['strength'] = parameters[
|
||||
'facetool_strength'
|
||||
]
|
||||
elif parameters['type'] == 'codeformer':
|
||||
postprocessing_metadata['type'] = 'codeformer'
|
||||
postprocessing_metadata['strength'] = parameters[
|
||||
'facetool_strength'
|
||||
]
|
||||
postprocessing_metadata['fidelity'] = parameters['codeformer_fidelity']
|
||||
|
||||
else:
|
||||
raise TypeError(f"Invalid type: {parameters['type']}")
|
||||
|
||||
|
@ -1,18 +1,73 @@
|
||||
# **Changelog**
|
||||
---
|
||||
title: Changelog
|
||||
---
|
||||
|
||||
## v1.13 (in process)
|
||||
# :octicons-log-16: **Changelog**
|
||||
|
||||
- Supports a Google Colab notebook for a standalone server running on Google hardware [Arturo Mendivil](https://github.com/artmen1516)
|
||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling [Kevin Gibbons](https://github.com/bakkot)
|
||||
- WebUI supports incremental display of in-progress images during generation [Kevin Gibbons](https://github.com/bakkot)
|
||||
- Output directory can be specified on the invoke> command line.
|
||||
- The grid was displaying duplicated images when not enough images to fill the final row [Muhammad Usama](https://github.com/SMUsamaShah)
|
||||
- Can specify --grid on invoke.py command line as the default.
|
||||
- Miscellaneous internal bug and stability fixes.
|
||||
## v2.0.1 (13 October 2022)
|
||||
|
||||
- fix noisy images at high step count when using k* samplers
|
||||
- dream.py script now calls invoke.py module directly rather than
|
||||
via a new python process (which could break the environment)
|
||||
|
||||
## v2.0.0 <small>(9 October 2022)</small>
|
||||
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
for backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/INPAINTING.md">inpainting</a> and <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OUTPAINTING.md">outpainting</a>
|
||||
- img2img runs on all k* samplers
|
||||
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/PROMPTS.md#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/POSTPROCESS.md">post-processing of previously-generated images</a>
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.m#this-is-an-example-of-txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.m">command-line completion behavior</a>.
|
||||
New commands added:
|
||||
* List command-line history with `!history`
|
||||
* Search command-line history with `!search`
|
||||
* Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
|
||||
## v1.14 <small>(11 September 2022)</small>
|
||||
|
||||
- Memory optimizations for small-RAM cards. 512x512 now possible on 4 GB GPUs.
|
||||
- Full support for Apple hardware with M1 or M2 chips.
|
||||
- Add "seamless mode" for circular tiling of image. Generates beautiful effects.
|
||||
([prixt](https://github.com/prixt)).
|
||||
- Inpainting support.
|
||||
- Improved web server GUI.
|
||||
- Lots of code and documentation cleanups.
|
||||
|
||||
## v1.13 <small>(3 September 2022)</small>
|
||||
|
||||
- Support image variations (see [VARIATIONS](features/VARIATIONS.md)
|
||||
([Kevin Gibbons](https://github.com/bakkot) and many contributors and reviewers)
|
||||
- Supports a Google Colab notebook for a standalone server running on Google hardware
|
||||
[Arturo Mendivil](https://github.com/artmen1516)
|
||||
- WebUI supports GFPGAN/ESRGAN facial reconstruction and upscaling
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- WebUI supports incremental display of in-progress images during generation
|
||||
[Kevin Gibbons](https://github.com/bakkot)
|
||||
- A new configuration file scheme that allows new models (including upcoming
|
||||
stable-diffusion-v1.5) to be added without altering the code.
|
||||
([David Wager](https://github.com/maddavid12))
|
||||
- Can specify --grid on invoke.py command line as the default.
|
||||
- Miscellaneous internal bug and stability fixes.
|
||||
- Works on M1 Apple hardware.
|
||||
- Multiple bug fixes.
|
||||
|
||||
---
|
||||
|
||||
## v1.12 (28 August 2022)
|
||||
## v1.12 <small>(28 August 2022)</small>
|
||||
|
||||
- Improved file handling, including ability to read prompts from standard input.
|
||||
(kudos to [Yunsaki](https://github.com/yunsaki)
|
||||
@ -26,7 +81,7 @@
|
||||
|
||||
---
|
||||
|
||||
## v1.11 (26 August 2022)
|
||||
## v1.11 <small>(26 August 2022)</small>
|
||||
|
||||
- NEW FEATURE: Support upscaling and face enhancement using the GFPGAN module. (kudos to [Oceanswave](https://github.com/Oceanswave)
|
||||
- You now can specify a seed of -1 to use the previous image's seed, -2 to use the seed for the image generated before that, etc.
|
||||
@ -39,13 +94,13 @@
|
||||
|
||||
---
|
||||
|
||||
## v1.10 (25 August 2022)
|
||||
## v1.10 <small>(25 August 2022)</small>
|
||||
|
||||
- A barebones but fully functional interactive web server for online generation of txt2img and img2img.
|
||||
|
||||
---
|
||||
|
||||
## v1.09 (24 August 2022)
|
||||
## v1.09 <small>(24 August 2022)</small>
|
||||
|
||||
- A new -v option allows you to generate multiple variants of an initial image
|
||||
in img2img mode. (kudos to [Oceanswave](https://github.com/Oceanswave). [
|
||||
@ -55,7 +110,7 @@
|
||||
|
||||
---
|
||||
|
||||
## v1.08 (24 August 2022)
|
||||
## v1.08 <small>(24 August 2022)</small>
|
||||
|
||||
- Escape single quotes on the invoke> command before trying to parse. This avoids
|
||||
parse errors.
|
||||
@ -66,7 +121,7 @@
|
||||
|
||||
---
|
||||
|
||||
## v1.07 (23 August 2022)
|
||||
## v1.07 <small>(23 August 2022)</small>
|
||||
|
||||
- Image filenames will now never fill gaps in the sequence, but will be assigned the
|
||||
next higher name in the chosen directory. This ensures that the alphabetic and chronological
|
||||
@ -74,14 +129,14 @@
|
||||
|
||||
---
|
||||
|
||||
## v1.06 (23 August 2022)
|
||||
## v1.06 <small>(23 August 2022)</small>
|
||||
|
||||
- Added weighted prompt support contributed by [xraxra](https://github.com/xraxra)
|
||||
- Example of using weighted prompts to tweak a demonic figure contributed by [bmaltais](https://github.com/bmaltais)
|
||||
|
||||
---
|
||||
|
||||
## v1.05 (22 August 2022 - after the drop)
|
||||
## v1.05 <small>(22 August 2022 - after the drop)</small>
|
||||
|
||||
- Filenames now use the following formats:
|
||||
000010.95183149.png -- Two files produced by the same command (e.g. -n2),
|
||||
@ -99,7 +154,7 @@
|
||||
|
||||
---
|
||||
|
||||
## v1.04 (22 August 2022 - after the drop)
|
||||
## v1.04 <small>(22 August 2022 - after the drop)</small>
|
||||
|
||||
- Updated README to reflect installation of the released weights.
|
||||
- Suppressed very noisy and inconsequential warning when loading the frozen CLIP
|
||||
@ -107,14 +162,14 @@
|
||||
|
||||
---
|
||||
|
||||
## v1.03 (22 August 2022)
|
||||
## v1.03 <small>(22 August 2022)</small>
|
||||
|
||||
- The original txt2img and img2img scripts from the CompViz repository have been moved into
|
||||
a subfolder named "orig_scripts", to reduce confusion.
|
||||
|
||||
---
|
||||
|
||||
## v1.02 (21 August 2022)
|
||||
## v1.02 <small>(21 August 2022)</small>
|
||||
|
||||
- A copy of the prompt and all of its switches and options is now stored in the corresponding
|
||||
image in a tEXt metadata field named "Dream". You can read the prompt using scripts/images2prompt.py,
|
||||
@ -123,7 +178,7 @@
|
||||
|
||||
---
|
||||
|
||||
## v1.01 (21 August 2022)
|
||||
## v1.01 <small>(21 August 2022)</small>
|
||||
|
||||
- added k_lms sampling.
|
||||
**Please run "conda env update" to load the k_lms dependencies!!**
|
||||
@ -134,4 +189,4 @@
|
||||
|
||||
## Links
|
||||
|
||||
- **[Read Me](../readme.md)**
|
||||
- **[Read Me](index.md)**
|
||||
|
BIN
docs/assets/inpainting/000019.curly.hair.deselected.png
Normal file
After Width: | Height: | Size: 519 KiB |
BIN
docs/assets/inpainting/000019.curly.hair.masked.png
Normal file
After Width: | Height: | Size: 11 KiB |
BIN
docs/assets/inpainting/000019.curly.hair.selected.png
Normal file
After Width: | Height: | Size: 519 KiB |
BIN
docs/assets/inpainting/000024.801380492.png
Normal file
After Width: | Height: | Size: 439 KiB |
BIN
docs/assets/invoke-web-server-9.png
Normal file
After Width: | Height: | Size: 1.1 MiB |
BIN
docs/assets/still-life-inpainted.png
Normal file
After Width: | Height: | Size: 338 KiB |
BIN
docs/assets/still-life-scaled.jpg
Normal file
After Width: | Height: | Size: 59 KiB |
@ -34,7 +34,7 @@ The script is confirmed to work on Linux, Windows and Mac systems.
|
||||
currently rudimentary, but a much better replacement is on its way.
|
||||
|
||||
```bash
|
||||
(ldm) ~/stable-diffusion$ python3 ./scripts/invoke.py
|
||||
(invokeai) ~/stable-diffusion$ python3 ./scripts/invoke.py
|
||||
* Initializing, be patient...
|
||||
Loading model from models/ldm/text2img-large/model.ckpt
|
||||
(...more initialization messages...)
|
||||
@ -51,7 +51,7 @@ invoke> "there's a fly in my soup" -n6 -g
|
||||
invoke> q
|
||||
|
||||
# this shows how to retrieve the prompt stored in the saved image's metadata
|
||||
(ldm) ~/stable-diffusion$ python ./scripts/images2prompt.py outputs/img_samples/*.png
|
||||
(invokeai) ~/stable-diffusion$ python ./scripts/images2prompt.py outputs/img_samples/*.png
|
||||
00009.png: "ashley judd riding a camel" -s150 -S 416354203
|
||||
00010.png: "ashley judd riding a camel" -s150 -S 1362479620
|
||||
00011.png: "there's a fly in my soup" -n6 -g -S 2685670268
|
||||
@ -60,7 +60,7 @@ invoke> q
|
||||
![invoke-py-demo](../assets/dream-py-demo.png)
|
||||
|
||||
The `invoke>` prompt's arguments are pretty much identical to those used in the
|
||||
Discord bot, except you don't need to type "!invoke" (it doesn't hurt if you do).
|
||||
Discord bot, except you don't need to type `!invoke` (it doesn't hurt if you do).
|
||||
A significant change is that creation of individual images is now the default
|
||||
unless `--grid` (`-g`) is given. A full list is given in
|
||||
[List of prompt arguments](#list-of-prompt-arguments).
|
||||
@ -75,8 +75,7 @@ the location of the model weight files.
|
||||
|
||||
These command-line arguments can be passed to `invoke.py` when you first run it
|
||||
from the Windows, Mac or Linux command line. Some set defaults that can be
|
||||
overridden on a per-prompt basis (see [List of prompt arguments]
|
||||
(#list-of-prompt-arguments). Others
|
||||
overridden on a per-prompt basis (see [List of prompt arguments](#list-of-prompt-arguments). Others
|
||||
|
||||
| Argument <img width="240" align="right"/> | Shortcut <img width="100" align="right"/> | Default <img width="320" align="right"/> | Description |
|
||||
| ----------------------------------------- | ----------------------------------------- | ---------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
|
||||
@ -86,6 +85,7 @@ overridden on a per-prompt basis (see [List of prompt arguments]
|
||||
| `--from_file <path>` | | `None` | Read list of prompts from a file. Use `-` to read from standard input |
|
||||
| `--model <modelname>` | | `stable-diffusion-1.4` | Loads model specified in configs/models.yaml. Currently one of "stable-diffusion-1.4" or "laion400m" |
|
||||
| `--full_precision` | `-F` | `False` | Run in slower full-precision mode. Needed for Macintosh M1/M2 hardware and some older video cards. |
|
||||
| `--png_compression <0-9>` | `-z<0-9>` | 6 | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
|
||||
| `--web` | | `False` | Start in web server mode |
|
||||
| `--host <ip addr>` | | `localhost` | Which network interface web server should listen on. Set to 0.0.0.0 to listen on any. |
|
||||
| `--port <port>` | | `9090` | Which port web server should listen for requests on. |
|
||||
@ -101,42 +101,49 @@ overridden on a per-prompt basis (see [List of prompt arguments]
|
||||
| `--free_gpu_mem` | | `False` | Free GPU memory after sampling, to allow image decoding and saving in low VRAM conditions |
|
||||
| `--precision` | | `auto` | Set model precision, default is selected by device. Options: auto, float32, float16, autocast |
|
||||
|
||||
#### deprecated
|
||||
!!! warning deprecated
|
||||
|
||||
These arguments are deprecated but still work:
|
||||
These arguments are deprecated but still work:
|
||||
|
||||
<div align="center" markdown>
|
||||
|
||||
| Argument | Shortcut | Default | Description |
|
||||
|--------------------|------------|---------------------|--------------|
|
||||
| --weights <path> | | None | Pth to weights file; use `--model stable-diffusion-1.4` instead |
|
||||
| --laion400m | -l | False | Use older LAION400m weights; use `--model=laion400m` instead |
|
||||
| Argument | Shortcut | Default | Description |
|
||||
|--------------------|------------|---------------------|--------------|
|
||||
| `--weights <path>` | | `None` | Pth to weights file; use `--model stable-diffusion-1.4` instead |
|
||||
| `--laion400m` | `-l` | `False` | Use older LAION400m weights; use `--model=laion400m` instead |
|
||||
|
||||
**A note on path names:** On Windows systems, you may run into
|
||||
problems when passing the invoke script standard backslashed path
|
||||
names because the Python interpreter treats "\" as an escape.
|
||||
You can either double your slashes (ick): C:\\\\path\\\\to\\\\my\\\\file, or
|
||||
use Linux/Mac style forward slashes (better): C:/path/to/my/file.
|
||||
</div>
|
||||
|
||||
!!! tip
|
||||
|
||||
On Windows systems, you may run into
|
||||
problems when passing the invoke script standard backslashed path
|
||||
names because the Python interpreter treats "\" as an escape.
|
||||
You can either double your slashes (ick): `C:\\path\\to\\my\\file`, or
|
||||
use Linux/Mac style forward slashes (better): `C:/path/to/my/file`.
|
||||
|
||||
## List of prompt arguments
|
||||
|
||||
After the invoke.py script initializes, it will present you with a
|
||||
**invoke>** prompt. Here you can enter information to generate images
|
||||
from text (txt2img), to embellish an existing image or sketch
|
||||
(img2img), or to selectively alter chosen regions of the image
|
||||
(inpainting).
|
||||
`invoke>` prompt. Here you can enter information to generate images
|
||||
from text ([txt2img](#txt2img)), to embellish an existing image or sketch
|
||||
([img2img](#img2img)), or to selectively alter chosen regions of the image
|
||||
([inpainting](#inpainting)).
|
||||
|
||||
### This is an example of txt2img:
|
||||
### txt2img
|
||||
|
||||
~~~~
|
||||
invoke> waterfall and rainbow -W640 -H480
|
||||
~~~~
|
||||
!!! example
|
||||
|
||||
This will create the requested image with the dimensions 640 (width)
|
||||
and 480 (height).
|
||||
```bash
|
||||
invoke> waterfall and rainbow -W640 -H480
|
||||
```
|
||||
|
||||
This will create the requested image with the dimensions 640 (width)
|
||||
and 480 (height).
|
||||
|
||||
Here are the invoke> command that apply to txt2img:
|
||||
|
||||
| Argument | Shortcut | Default | Description |
|
||||
| Argument <img width="680" align="right"/> | Shortcut <img width="420" align="right"/> | Default <img width="480" align="right"/> | Description |
|
||||
|--------------------|------------|---------------------|--------------|
|
||||
| "my prompt" | | | Text prompt to use. The quotation marks are optional. |
|
||||
| --width <int> | -W<int> | 512 | Width of generated image |
|
||||
@ -147,10 +154,12 @@ Here are the invoke> command that apply to txt2img:
|
||||
| --seed <int> | -S<int> | None | Set the random seed for the next series of images. This can be used to recreate an image generated previously.|
|
||||
| --sampler <sampler>| -A<sampler>| k_lms | Sampler to use. Use -h to get list of available samplers. |
|
||||
| --hires_fix | | | Larger images often have duplication artefacts. This option suppresses duplicates by generating the image at low res, and then using img2img to increase the resolution |
|
||||
| --png_compression <0-9> | -z<0-9> | 6 | Select level of compression for output files, from 0 (no compression) to 9 (max compression) |
|
||||
| --grid | -g | False | Turn on grid mode to return a single image combining all the images generated by this prompt |
|
||||
| --individual | -i | True | Turn off grid mode (deprecated; leave off --grid instead) |
|
||||
| --outdir <path> | -o<path> | outputs/img_samples | Temporarily change the location of these images |
|
||||
| --seamless | | False | Activate seamless tiling for interesting effects |
|
||||
| --seamless_axes | | x,y | Specify which axes to use circular convolution on. |
|
||||
| --log_tokenization | -t | False | Display a color-coded list of the parsed tokens derived from the prompt |
|
||||
| --skip_normalization| -x | False | Weighted subprompts will not be normalized. See [Weighted Prompts](./OTHER.md#weighted-prompts) |
|
||||
| --upscale <int> <float> | -U <int> <float> | -U 1 0.75| Upscale image by magnification factor (2, 4), and set strength of upscaling (0.0-1.0). If strength not set, will default to 0.75. |
|
||||
@ -182,69 +191,97 @@ photo and you may run out of memory if it is large.
|
||||
In addition to the command-line options recognized by txt2img, img2img
|
||||
accepts additional options:
|
||||
|
||||
| Argument | Shortcut | Default | Description |
|
||||
|--------------------|------------|---------------------|--------------|
|
||||
| --init_img <path> | -I<path> | None | Path to the initialization image |
|
||||
| --fit | -F | False | Scale the image to fit into the specified -H and -W dimensions |
|
||||
| --strength <float> | -s<float> | 0.75 | How hard to try to match the prompt to the initial image. Ranges from 0.0-0.99, with higher values replacing the initial image completely.|
|
||||
| Argument <img width="160" align="right"/> | Shortcut | Default | Description |
|
||||
|----------------------|-------------|-----------------|--------------|
|
||||
| `--init_img <path>` | `-I<path>` | `None` | Path to the initialization image |
|
||||
| `--fit` | `-F` | `False` | Scale the image to fit into the specified -H and -W dimensions |
|
||||
| `--strength <float>` | `-s<float>` | `0.75` | How hard to try to match the prompt to the initial image. Ranges from 0.0-0.99, with higher values replacing the initial image completely.|
|
||||
|
||||
### This is an example of inpainting:
|
||||
### inpainting
|
||||
|
||||
~~~~
|
||||
invoke> waterfall and rainbow -I./vacation-photo.png -M./vacation-mask.png -W640 -H480 --fit
|
||||
~~~~
|
||||
!!! example
|
||||
|
||||
This will do the same thing as img2img, but image alterations will
|
||||
only occur within transparent areas defined by the mask file specified
|
||||
by -M. You may also supply just a single initial image with the areas
|
||||
to overpaint made transparent, but you must be careful not to destroy
|
||||
the pixels underneath when you create the transparent areas. See
|
||||
[Inpainting](./INPAINTING.md) for details.
|
||||
```bash
|
||||
invoke> waterfall and rainbow -I./vacation-photo.png -M./vacation-mask.png -W640 -H480 --fit
|
||||
```
|
||||
|
||||
This will do the same thing as img2img, but image alterations will
|
||||
only occur within transparent areas defined by the mask file specified
|
||||
by `-M`. You may also supply just a single initial image with the areas
|
||||
to overpaint made transparent, but you must be careful not to destroy
|
||||
the pixels underneath when you create the transparent areas. See
|
||||
[Inpainting](./INPAINTING.md) for details.
|
||||
|
||||
inpainting accepts all the arguments used for txt2img and img2img, as
|
||||
well as the --mask (-M) argument:
|
||||
well as the --mask (-M) and --text_mask (-tm) arguments:
|
||||
|
||||
| Argument | Shortcut | Default | Description |
|
||||
| Argument <img width="100" align="right"/> | Shortcut | Default | Description |
|
||||
|--------------------|------------|---------------------|--------------|
|
||||
| --init_mask <path> | -M<path> | None |Path to an image the same size as the initial_image, with areas for inpainting made transparent.|
|
||||
| `--init_mask <path>` | `-M<path>` | `None` |Path to an image the same size as the initial_image, with areas for inpainting made transparent.|
|
||||
| `--text_mask <prompt> [<float>]` | `-tm <prompt> [<float>]` | <none> | Create a mask from a text prompt describing part of the image|
|
||||
|
||||
`--text_mask` (short form `-tm`) is a way to generate a mask using a
|
||||
text description of the part of the image to replace. For example, if
|
||||
you have an image of a breakfast plate with a bagel, toast and
|
||||
scrambled eggs, you can selectively mask the bagel and replace it with
|
||||
a piece of cake this way:
|
||||
|
||||
# Postprocessing
|
||||
~~~
|
||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel
|
||||
~~~
|
||||
|
||||
The algorithm uses <a
|
||||
href="https://github.com/timojl/clipseg">clipseg</a> to classify
|
||||
different regions of the image. The classifier puts out a confidence
|
||||
score for each region it identifies. Generally regions that score
|
||||
above 0.5 are reliable, but if you are getting too much or too little
|
||||
masking you can adjust the threshold down (to get more mask), or up
|
||||
(to get less). In this example, by passing `-tm` a higher value, we
|
||||
are insisting on a more stringent classification.
|
||||
|
||||
~~~
|
||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel 0.6
|
||||
~~~
|
||||
|
||||
# Other Commands
|
||||
|
||||
The CLI offers a number of commands that begin with "!".
|
||||
|
||||
## Postprocessing images
|
||||
|
||||
To postprocess a file using face restoration or upscaling, use the
|
||||
`!fix` command.
|
||||
|
||||
## !fix
|
||||
### `!fix`
|
||||
|
||||
This command runs a post-processor on a previously-generated image. It
|
||||
takes a PNG filename or path and applies your choice of the -U, -G, or
|
||||
--embiggen switches in order to fix faces or upscale. If you provide a
|
||||
takes a PNG filename or path and applies your choice of the `-U`, `-G`, or
|
||||
`--embiggen` switches in order to fix faces or upscale. If you provide a
|
||||
filename, the script will look for it in the current output
|
||||
directory. Otherwise you can provide a full or partial path to the
|
||||
desired file.
|
||||
|
||||
Some examples:
|
||||
|
||||
Upscale to 4X its original size and fix faces using codeformer:
|
||||
~~~
|
||||
invoke> !fix 0000045.4829112.png -G1 -U4 -ft codeformer
|
||||
~~~
|
||||
!!! example ""
|
||||
|
||||
Use the GFPGAN algorithm to fix faces, then upscale to 3X using --embiggen:
|
||||
Upscale to 4X its original size and fix faces using codeformer:
|
||||
|
||||
~~~
|
||||
invoke> !fix 0000045.4829112.png -G0.8 -ft gfpgan
|
||||
>> fixing outputs/img-samples/0000045.4829112.png
|
||||
>> retrieved seed 4829112 and prompt "boy enjoying a banana split"
|
||||
>> GFPGAN - Restoring Faces for image seed:4829112
|
||||
Outputs:
|
||||
[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8
|
||||
```bash
|
||||
invoke> !fix 0000045.4829112.png -G1 -U4 -ft codeformer
|
||||
```
|
||||
|
||||
invoke> !fix 000017.4829112.gfpgan-00.png --embiggen 3
|
||||
...lots of text...
|
||||
Outputs:
|
||||
[2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix "outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512 -H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25
|
||||
~~~
|
||||
!!! example ""
|
||||
|
||||
Use the GFPGAN algorithm to fix faces, then upscale to 3X using --embiggen:
|
||||
|
||||
```bash
|
||||
invoke> !fix 0000045.4829112.png -G0.8 -ft gfpgan
|
||||
>> fixing outputs/img-samples/0000045.4829112.png
|
||||
>> retrieved seed 4829112 and prompt "boy enjoying a banana split"
|
||||
>> GFPGAN - Restoring Faces for image seed:4829112
|
||||
Outputs:
|
||||
[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8
|
||||
|
||||
# Model selection and importation
|
||||
|
||||
@ -391,13 +428,26 @@ OK to import [n]? y
|
||||
>> Loading waifu-diffusion from models/ldm/stable-diffusion-v1/model-epoch10-float16.ckpt
|
||||
...
|
||||
</pre>
|
||||
|
||||
=======
|
||||
invoke> !fix 000017.4829112.gfpgan-00.png --embiggen 3
|
||||
...lots of text...
|
||||
Outputs:
|
||||
[2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix "outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512 -H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25
|
||||
```
|
||||
# History processing
|
||||
|
||||
The CLI provides a series of convenient commands for reviewing previous
|
||||
actions, retrieving them, modifying them, and re-running them.
|
||||
```bash
|
||||
invoke> !fetch 0000015.8929913.png
|
||||
# the script returns the next line, ready for editing and running:
|
||||
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
||||
```
|
||||
|
||||
## !history
|
||||
Note that this command may behave unexpectedly if given a PNG file that
|
||||
was not generated by InvokeAI.
|
||||
|
||||
### `!history`
|
||||
|
||||
The invoke script keeps track of all the commands you issue during a
|
||||
session, allowing you to re-run them. On Mac and Linux systems, it
|
||||
@ -406,10 +456,10 @@ the most recent 1000 commands issued.
|
||||
|
||||
The `!history` command will return a numbered list of all the commands
|
||||
issued during the session (Windows), or the most recent 1000 commands
|
||||
(Mac|Linux). You can then repeat a command by using the command !NNN,
|
||||
(Mac|Linux). You can then repeat a command by using the command `!NNN`,
|
||||
where "NNN" is the history line number. For example:
|
||||
|
||||
~~~
|
||||
```bash
|
||||
invoke> !history
|
||||
...
|
||||
[14] happy woman sitting under tree wearing broad hat and flowing garment
|
||||
@ -420,7 +470,7 @@ invoke> !history
|
||||
...
|
||||
invoke> !20
|
||||
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
~~~
|
||||
```
|
||||
|
||||
## !fetch
|
||||
|
||||
@ -438,56 +488,66 @@ invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
||||
Note that this command may behave unexpectedly if given a PNG file that
|
||||
was not generated by InvokeAI.
|
||||
|
||||
## !search <search string>
|
||||
### !search <search string>
|
||||
|
||||
This is similar to !history but it only returns lines that contain
|
||||
`search string`. For example:
|
||||
|
||||
~~~
|
||||
```bash
|
||||
invoke> !search surreal
|
||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
~~~
|
||||
```
|
||||
|
||||
## !clear
|
||||
### `!clear`
|
||||
|
||||
This clears the search history from memory and disk. Be advised that
|
||||
this operation is irreversible and does not issue any warnings!
|
||||
|
||||
# Command-line editing and completion
|
||||
Other ! Commands
|
||||
|
||||
### !mask
|
||||
|
||||
This command takes an image, a text prompt, and uses the `clipseg`
|
||||
algorithm to automatically generate a mask of the area that matches
|
||||
the text prompt. It is useful for debugging the text masking process
|
||||
prior to inpainting with the `--text_mask` argument. See
|
||||
[INPAINTING.md] for details.
|
||||
|
||||
## Command-line editing and completion
|
||||
|
||||
The command-line offers convenient history tracking, editing, and
|
||||
command completion.
|
||||
|
||||
- To scroll through previous commands and potentially edit/reuse them, use the up and down cursor keys.
|
||||
- To edit the current command, use the left and right cursor keys to position the cursor, and then backspace, delete or insert characters.
|
||||
- To move to the very beginning of the command, type CTRL-A (or command-A on the Mac)
|
||||
- To move to the end of the command, type CTRL-E.
|
||||
- To cut a section of the command, position the cursor where you want to start cutting and type CTRL-K.
|
||||
- To paste a cut section back in, position the cursor where you want to paste, and type CTRL-Y
|
||||
- To scroll through previous commands and potentially edit/reuse them, use the ++up++ and ++down++ keys.
|
||||
- To edit the current command, use the ++left++ and ++right++ keys to position the cursor, and then ++backspace++, ++delete++ or insert characters.
|
||||
- To move to the very beginning of the command, type ++ctrl+a++ (or ++command+a++ on the Mac)
|
||||
- To move to the end of the command, type ++ctrl+e++.
|
||||
- To cut a section of the command, position the cursor where you want to start cutting and type ++ctrl+k++
|
||||
- To paste a cut section back in, position the cursor where you want to paste, and type ++ctrl+y++
|
||||
|
||||
Windows users can get similar, but more limited, functionality if they
|
||||
launch invoke.py with the "winpty" program and have the `pyreadline3`
|
||||
launch `invoke.py` with the `winpty` program and have the `pyreadline3`
|
||||
library installed:
|
||||
|
||||
~~~
|
||||
```batch
|
||||
> winpty python scripts\invoke.py
|
||||
~~~
|
||||
```
|
||||
|
||||
On the Mac and Linux platforms, when you exit invoke.py, the last 1000
|
||||
lines of your command-line history will be saved. When you restart
|
||||
invoke.py, you can access the saved history using the up-arrow key.
|
||||
`invoke.py`, you can access the saved history using the ++up++ key.
|
||||
|
||||
In addition, limited command-line completion is installed. In various
|
||||
contexts, you can start typing your command and press tab. A list of
|
||||
contexts, you can start typing your command and press ++tab++. A list of
|
||||
potential completions will be presented to you. You can then type a
|
||||
little more, hit tab again, and eventually autocomplete what you want.
|
||||
little more, hit ++tab++ again, and eventually autocomplete what you want.
|
||||
|
||||
When specifying file paths using the one-letter shortcuts, the CLI
|
||||
will attempt to complete pathnames for you. This is most handy for the
|
||||
-I (init image) and -M (init mask) paths. To initiate completion, start
|
||||
the path with a slash ("/") or "./". For example:
|
||||
`-I` (init image) and `-M` (init mask) paths. To initiate completion, start
|
||||
the path with a slash (`/`) or `./`. For example:
|
||||
|
||||
~~~
|
||||
```bash
|
||||
invoke> zebra with a mustache -I./test-pictures<TAB>
|
||||
-I./test-pictures/Lincoln-and-Parrot.png -I./test-pictures/zebra.jpg -I./test-pictures/madonna.png
|
||||
-I./test-pictures/bad-sketch.png -I./test-pictures/man_with_eagle/
|
||||
|
@ -43,7 +43,7 @@ it's similar to that, except it can work up to an arbitrarily large size
|
||||
has extra logic to re-run any number of the tile sub-sections of the image
|
||||
if for example a small part of a huge run got messed up.
|
||||
|
||||
## Usage
|
||||
### Usage
|
||||
|
||||
`-embiggen <scaling_factor> <esrgan_strength> <overlap_ratio OR overlap_pixels>`
|
||||
|
||||
@ -100,26 +100,30 @@ Tiles are numbered starting with one, and left-to-right,
|
||||
top-to-bottom. So, if you are generating a 3x3 tiled image, the
|
||||
middle row would be `4 5 6`.
|
||||
|
||||
## Example Usage
|
||||
### Examples
|
||||
|
||||
Running Embiggen with 512x512 tiles on an existing image, scaling up by a factor of 2.5x;
|
||||
and doing the same again (default ESRGAN strength is 0.75, default overlap between tiles is 0.25):
|
||||
!!! example ""
|
||||
|
||||
```bash
|
||||
invoke > a photo of a forest at sunset -s 100 -W 512 -H 512 -I outputs/forest.png -f 0.4 -embiggen 2.5
|
||||
invoke > a photo of a forest at sunset -s 100 -W 512 -H 512 -I outputs/forest.png -f 0.4 -embiggen 2.5 0.75 0.25
|
||||
```
|
||||
Running Embiggen with 512x512 tiles on an existing image, scaling up by a factor of 2.5x;
|
||||
and doing the same again (default ESRGAN strength is 0.75, default overlap between tiles is 0.25):
|
||||
|
||||
If your starting image was also 512x512 this should have taken 9 tiles.
|
||||
```bash
|
||||
invoke > a photo of a forest at sunset -s 100 -W 512 -H 512 -I outputs/forest.png -f 0.4 -embiggen 2.5
|
||||
invoke > a photo of a forest at sunset -s 100 -W 512 -H 512 -I outputs/forest.png -f 0.4 -embiggen 2.5 0.75 0.25
|
||||
```
|
||||
|
||||
If there weren't enough clouds in the sky of that forest you just made
|
||||
(and that image is about 1280 pixels (512*2.5) wide A.K.A. three
|
||||
512x512 tiles with 0.25 overlaps wide) we can replace that top row of
|
||||
tiles:
|
||||
If your starting image was also 512x512 this should have taken 9 tiles.
|
||||
|
||||
```bash
|
||||
invoke> a photo of puffy clouds over a forest at sunset -s 100 -W 512 -H 512 -I outputs/000002.seed.png -f 0.5 -embiggen_tiles 1 2 3
|
||||
```
|
||||
!!! example ""
|
||||
|
||||
If there weren't enough clouds in the sky of that forest you just made
|
||||
(and that image is about 1280 pixels (512*2.5) wide A.K.A. three
|
||||
512x512 tiles with 0.25 overlaps wide) we can replace that top row of
|
||||
tiles:
|
||||
|
||||
```bash
|
||||
invoke> a photo of puffy clouds over a forest at sunset -s 100 -W 512 -H 512 -I outputs/000002.seed.png -f 0.5 -embiggen_tiles 1 2 3
|
||||
```
|
||||
|
||||
## Fixing Previously-Generated Images
|
||||
|
||||
@ -128,27 +132,27 @@ look up the original prompt and provide an initial image. Just use the
|
||||
syntax `!fix path/to/file.png <embiggen>`. For example, you can rewrite the
|
||||
previous command to look like this:
|
||||
|
||||
~~~~
|
||||
```bash
|
||||
invoke> !fix ./outputs/000002.seed.png -embiggen_tiles 1 2 3
|
||||
~~~~
|
||||
```
|
||||
|
||||
A new file named `000002.seed.fixed.png` will be created in the output directory. Note that
|
||||
the `!fix` command does not replace the original file, unlike the behavior at generate time.
|
||||
You do not need to provide the prompt, and `!fix` automatically selects a good strength for
|
||||
embiggen-ing.
|
||||
|
||||
!!! note
|
||||
|
||||
**Note**
|
||||
Because the same prompt is used on all the tiled images, and the model
|
||||
doesn't have the context of anything outside the tile being run - it
|
||||
can end up creating repeated pattern (also called 'motifs') across all
|
||||
the tiles based on that prompt. The best way to combat this is
|
||||
lowering the `--strength` (`-f`) to stay more true to the init image,
|
||||
and increasing the number of steps so there is more compute-time to
|
||||
create the detail. Anecdotally `--strength` 0.35-0.45 works pretty
|
||||
well on most things. It may also work great in some examples even with
|
||||
the `--strength` set high for patterns, landscapes, or subjects that
|
||||
are more abstract. Because this is (relatively) fast, you can also
|
||||
preserve the best parts from each.
|
||||
Because the same prompt is used on all the tiled images, and the model
|
||||
doesn't have the context of anything outside the tile being run - it
|
||||
can end up creating repeated pattern (also called 'motifs') across all
|
||||
the tiles based on that prompt. The best way to combat this is
|
||||
lowering the `--strength` (`-f`) to stay more true to the init image,
|
||||
and increasing the number of steps so there is more compute-time to
|
||||
create the detail. Anecdotally `--strength` 0.35-0.45 works pretty
|
||||
well on most things. It may also work great in some examples even with
|
||||
the `--strength` set high for patterns, landscapes, or subjects that
|
||||
are more abstract. Because this is (relatively) fast, you can also
|
||||
preserve the best parts from each.
|
||||
|
||||
Author: [Travco](https://github.com/travco)
|
||||
|
@ -2,7 +2,9 @@
|
||||
title: Image-to-Image
|
||||
---
|
||||
|
||||
# :material-image-multiple: **IMG2IMG**
|
||||
# :material-image-multiple: Image-to-Image
|
||||
|
||||
## `img2img`
|
||||
|
||||
This script also provides an `img2img` feature that lets you seed your creations with an initial
|
||||
drawing or photo. This is a really cool feature that tells stable diffusion to build the prompt on
|
||||
@ -15,13 +17,17 @@ tree on a hill with a river, nature photograph, national geographic -I./test-pic
|
||||
|
||||
This will take the original image shown here:
|
||||
|
||||
<div align="center" markdown>
|
||||
<img src="https://user-images.githubusercontent.com/50542132/193946000-c42a96d8-5a74-4f8a-b4c3-5213e6cadcce.png" width=350>
|
||||
|
||||
</div>
|
||||
|
||||
and generate a new image based on it as shown here:
|
||||
|
||||
<div align="center" markdown>
|
||||
<img src="https://user-images.githubusercontent.com/111189/194135515-53d4c060-e994-4016-8121-7c685e281ac9.png" width=350>
|
||||
</div>
|
||||
|
||||
The `--init_img (-I)` option gives the path to the seed picture. `--strength (-f)` controls how much
|
||||
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength` (`-f`) controls how much
|
||||
the original will be modified, ranging from `0.0` (keep the original intact), to `1.0` (ignore the
|
||||
original completely). The default is `0.75`, and ranges from `0.25-0.90` give interesting results.
|
||||
Other relevant options include `-C` (classification free guidance scale), and `-s` (steps). Unlike `txt2img`,
|
||||
@ -37,21 +43,22 @@ a very different image:
|
||||
|
||||
`photograph of a tree on a hill with a river`
|
||||
|
||||
<div align="center" markdown>
|
||||
<img src="https://user-images.githubusercontent.com/111189/194135220-16b62181-b60c-4248-8989-4834a8fd7fbd.png" width=350>
|
||||
</div>
|
||||
|
||||
(When designing prompts, think about how the images scraped from the internet were captioned. Very few photographs will
|
||||
be labeled "photograph" or "photorealistic." They will, however, be captioned with the publication, photographer, camera
|
||||
model, or film settings.)
|
||||
!!! tip
|
||||
|
||||
When designing prompts, think about how the images scraped from the internet were captioned. Very few photographs will
|
||||
be labeled "photograph" or "photorealistic." They will, however, be captioned with the publication, photographer, camera
|
||||
model, or film settings.
|
||||
|
||||
If the initial image contains transparent regions, then Stable Diffusion will only draw within the
|
||||
transparent regions, a process called "inpainting". However, for this to work correctly, the color
|
||||
transparent regions, a process called [`inpainting`](./INPAINTING.md#creating-transparent-regions-for-inpainting). However, for this to work correctly, the color
|
||||
information underneath the transparent needs to be preserved, not erased.
|
||||
|
||||
More details can be found here:
|
||||
[Creating Transparent Images For Inpainting](./INPAINTING.md#creating-transparent-regions-for-inpainting)
|
||||
!!! warning
|
||||
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
**IMPORTANT ISSUE** `img2img` does not work properly on initial images smaller than 512x512. Please scale your
|
||||
image to at least 512x512 before using it. Larger images are not a problem, but may run out of VRAM on your
|
||||
GPU card. To fix this, use the --fit option, which downscales the initial image to fit within the box specified
|
||||
@ -60,74 +67,67 @@ by width x height:
|
||||
tree on a hill with a river, national geographic -I./test-pictures/big-sketch.png -H512 -W512 --fit
|
||||
~~~
|
||||
|
||||
>>>>>>> main
|
||||
## How does it actually work, though?
|
||||
|
||||
The main difference between `img2img` and `prompt2img` is the starting point. While `prompt2img` always starts with pure
|
||||
gaussian noise and progressively refines it over the requested number of steps, `img2img` skips some of these earlier steps
|
||||
(how many it skips is indirectly controlled by the `--strength` parameter), and uses instead your initial image mixed with gaussian noise as the starting image.
|
||||
The main difference between `img2img` and `prompt2img` is the starting point. While `prompt2img` always starts with pure
|
||||
gaussian noise and progressively refines it over the requested number of steps, `img2img` skips some of these earlier steps
|
||||
(how many it skips is indirectly controlled by the `--strength` parameter), and uses instead your initial image mixed with gaussian noise as the starting image.
|
||||
|
||||
**Let's start** by thinking about vanilla `prompt2img`, just generating an image from a prompt. If the step count is 10, then the "latent space" (Stable Diffusion's internal representation of the image) for the prompt "fire" with seed `1592514025` develops something like this:
|
||||
|
||||
```commandline
|
||||
<<<<<<< HEAD
|
||||
dream> "fire" -s10 -W384 -H384 -S1592514025
|
||||
=======
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||
>>>>>>> main
|
||||
```
|
||||
|
||||
<div align="center" markdown>
|
||||
![latent steps](../assets/img2img/000019.steps.png)
|
||||
</div>
|
||||
|
||||
Put simply: starting from a frame of fuzz/static, SD finds details in each frame that it thinks look like "fire" and brings them a little bit more into focus, gradually scrubbing out the fuzz until a clear image remains.
|
||||
Put simply: starting from a frame of fuzz/static, SD finds details in each frame that it thinks look like "fire" and brings them a little bit more into focus, gradually scrubbing out the fuzz until a clear image remains.
|
||||
|
||||
**When you use `img2img`** some of the earlier steps are cut, and instead an initial image of your choice is used. But because of how the maths behind Stable Diffusion works, this image needs to be mixed with just the right amount of noise (fuzz/static) for where it is being inserted. This is where the strength parameter comes in. Depending on the set strength, your image will be inserted into the sequence at the appropriate point, with just the right amount of noise.
|
||||
**When you use `img2img`** some of the earlier steps are cut, and instead an initial image of your choice is used. But because of how the maths behind Stable Diffusion works, this image needs to be mixed with just the right amount of noise (fuzz/static) for where it is being inserted. This is where the strength parameter comes in. Depending on the set strength, your image will be inserted into the sequence at the appropriate point, with just the right amount of noise.
|
||||
|
||||
### A concrete example
|
||||
|
||||
Say I want SD to draw a fire based on this hand-drawn image:
|
||||
I want SD to draw a fire based on this hand-drawn image:
|
||||
|
||||
<div align="center" markdown>
|
||||
![drawing of a fireplace](../assets/img2img/fire-drawing.png)
|
||||
</div>
|
||||
|
||||
Let's only do 10 steps, to make it easier to see what's happening. If strength is `0.7`, this is what the internal steps the algorithm has to take will look like:
|
||||
|
||||
![](../assets/img2img/000032.steps.gravity.png)
|
||||
<div align="center" markdown>
|
||||
![gravity32](../assets/img2img/000032.steps.gravity.png)
|
||||
</div>
|
||||
|
||||
With strength `0.4`, the steps look more like this:
|
||||
|
||||
![](../assets/img2img/000030.steps.gravity.png)
|
||||
<div align="center" markdown>
|
||||
![gravity30](../assets/img2img/000030.steps.gravity.png)
|
||||
</div>
|
||||
|
||||
Notice how much more fuzzy the starting image is for strength `0.7` compared to `0.4`, and notice also how much longer the sequence is with `0.7`:
|
||||
|
||||
| | strength = 0.7 | strength = 0.4 |
|
||||
| -- | -- | -- |
|
||||
| initial image that SD sees | ![](../assets/img2img/000032.step-0.png) | ![](../assets/img2img/000030.step-0.png) |
|
||||
<<<<<<< HEAD
|
||||
| steps argument to `dream>` | `-S10` | `-S10` |
|
||||
=======
|
||||
| steps argument to `invoke>` | `-S10` | `-S10` |
|
||||
>>>>>>> main
|
||||
| steps actually taken | 7 | 4 |
|
||||
| latent space at each step | ![](../assets/img2img/000032.steps.gravity.png) | ![](../assets/img2img/000030.steps.gravity.png) |
|
||||
| output | ![](../assets/img2img/000032.1592514025.png) | ![](../assets/img2img/000030.1592514025.png) |
|
||||
| latent space at each step | ![gravity32](../assets/img2img/000032.steps.gravity.png) | ![gravity30](../assets/img2img/000030.steps.gravity.png) |
|
||||
| output | ![000032.1592514025](../assets/img2img/000032.1592514025.png) | ![000030.1592514025](../assets/img2img/000030.1592514025.png) |
|
||||
|
||||
Both of the outputs look kind of like what I was thinking of. With the strength higher, my input becomes more vague, *and* Stable Diffusion has more steps to refine its output. But it's not really making what I want, which is a picture of cheery open fire. With the strength lower, my input is more clear, *but* Stable Diffusion has less chance to refine itself, so the result ends up inheriting all the problems of my bad drawing.
|
||||
|
||||
If you want to try this out yourself, all of these are using a seed of `1592514025` with a width/height of `384`, step count `10`, the default sampler (`k_lms`), and the single-word prompt `"fire"`:
|
||||
|
||||
If you want to try this out yourself, all of these are using a seed of `1592514025` with a width/height of `384`, step count `10`, the default sampler (`k_lms`), and the single-word prompt `fire`:
|
||||
|
||||
```commandline
|
||||
<<<<<<< HEAD
|
||||
dream> "fire" -s10 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png --strength 0.7
|
||||
```
|
||||
|
||||
The code for rendering intermediates is on my (damian0815's) branch [document-img2img](https://github.com/damian0815/InvokeAI/tree/document-img2img) - run `dream.py` and check your `outputs/img-samples/intermediates` folder while generating an image.
|
||||
=======
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png --strength 0.7
|
||||
```
|
||||
|
||||
The code for rendering intermediates is on my (damian0815's) branch [document-img2img](https://github.com/damian0815/InvokeAI/tree/document-img2img) - run `invoke.py` and check your `outputs/img-samples/intermediates` folder while generating an image.
|
||||
>>>>>>> main
|
||||
|
||||
### Compensating for the reduced step count
|
||||
|
||||
@ -136,44 +136,42 @@ After putting this guide together I was curious to see how the difference would
|
||||
Here's strength `0.4` (note step count `50`, which is `20 ÷ 0.4` to make sure SD does `20` steps from my image):
|
||||
|
||||
```commandline
|
||||
<<<<<<< HEAD
|
||||
dream> "fire" -s50 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.4
|
||||
=======
|
||||
invoke> "fire" -s50 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.4
|
||||
>>>>>>> main
|
||||
```
|
||||
|
||||
![](../assets/img2img/000035.1592514025.png)
|
||||
<div align="center" markdown>
|
||||
![000035.1592514025](../assets/img2img/000035.1592514025.png)
|
||||
</div>
|
||||
|
||||
and strength `0.7` (note step count `30`, which is roughly `20 ÷ 0.7` to make sure SD does `20` steps from my image):
|
||||
and here is strength `0.7` (note step count `30`, which is roughly `20 ÷ 0.7` to make sure SD does `20` steps from my image):
|
||||
|
||||
```commandline
|
||||
<<<<<<< HEAD
|
||||
dream> "fire" -s30 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.7
|
||||
=======
|
||||
invoke> "fire" -s30 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.7
|
||||
>>>>>>> main
|
||||
```
|
||||
|
||||
![](../assets/img2img/000046.1592514025.png)
|
||||
<div align="center" markdown>
|
||||
![000046.1592514025](../assets/img2img/000046.1592514025.png)
|
||||
</div>
|
||||
|
||||
In both cases the image is nice and clean and "finished", but because at strength `0.7` Stable Diffusion has been give so much more freedom to improve on my badly-drawn flames, they've come out looking much better. You can really see the difference when looking at the latent steps. There's more noise on the first image with strength `0.7`:
|
||||
|
||||
![](../assets/img2img/000046.steps.gravity.png)
|
||||
![gravity46](../assets/img2img/000046.steps.gravity.png)
|
||||
|
||||
than there is for strength `0.4`:
|
||||
|
||||
![](../assets/img2img/000035.steps.gravity.png)
|
||||
![gravity35](../assets/img2img/000035.steps.gravity.png)
|
||||
|
||||
and that extra noise gives the algorithm more choices when it is evaluating how to denoise any particular pixel in the image.
|
||||
and that extra noise gives the algorithm more choices when it is evaluating how to denoise any particular pixel in the image.
|
||||
|
||||
Unfortunately, it seems that `img2img` is very sensitive to the step count. Here's strength `0.7` with a step count of `29` (SD did 19 steps from my image):
|
||||
|
||||
![](../assets/img2img/000045.1592514025.png)
|
||||
<div align="center" markdown>
|
||||
![gravity45](../assets/img2img/000045.1592514025.png)
|
||||
</div>
|
||||
|
||||
By comparing the latents we can sort of see that something got interpreted differently enough on the third or fourth step to lead to a rather different interpretation of the flames.
|
||||
|
||||
![](../assets/img2img/000046.steps.gravity.png)
|
||||
![](../assets/img2img/000045.steps.gravity.png)
|
||||
![gravity46](../assets/img2img/000046.steps.gravity.png)
|
||||
![gravity45](../assets/img2img/000045.steps.gravity.png)
|
||||
|
||||
This is the result of a difference in the de-noising "schedule" - basically the noise has to be cleaned by a certain degree each step or the model won't "converge" on the image properly (see https://huggingface.co/blog/stable_diffusion for more about that). A different step count means a different schedule, which means things get interpreted slightly differently at every step.
|
||||
This is the result of a difference in the de-noising "schedule" - basically the noise has to be cleaned by a certain degree each step or the model won't "converge" on the image properly (see [stable diffusion blog](https://huggingface.co/blog/stable_diffusion) for more about that). A different step count means a different schedule, which means things get interpreted slightly differently at every step.
|
||||
|
@ -6,27 +6,148 @@ title: Inpainting
|
||||
|
||||
## **Creating Transparent Regions for Inpainting**
|
||||
|
||||
Inpainting is really cool. To do it, you start with an initial image and use a photoeditor to make
|
||||
one or more regions transparent (i.e. they have a "hole" in them). You then provide the path to this
|
||||
image at the invoke> command line using the `-I` switch. Stable Diffusion will only paint within the
|
||||
transparent region.
|
||||
Inpainting is really cool. To do it, you start with an initial image
|
||||
and use a photoeditor to make one or more regions transparent
|
||||
(i.e. they have a "hole" in them). You then provide the path to this
|
||||
image at the dream> command line using the `-I` switch. Stable
|
||||
Diffusion will only paint within the transparent region.
|
||||
|
||||
There's a catch. In the current implementation, you have to prepare the initial image correctly so
|
||||
that the underlying colors are preserved under the transparent area. Many imaging editing
|
||||
applications will by default erase the color information under the transparent pixels and replace
|
||||
them with white or black, which will lead to suboptimal inpainting. You also must take care to
|
||||
export the PNG file in such a way that the color information is preserved.
|
||||
There's a catch. In the current implementation, you have to prepare
|
||||
the initial image correctly so that the underlying colors are
|
||||
preserved under the transparent area. Many imaging editing
|
||||
applications will by default erase the color information under the
|
||||
transparent pixels and replace them with white or black, which will
|
||||
lead to suboptimal inpainting. It often helps to apply incomplete
|
||||
transparency, such as any value between 1 and 99%
|
||||
|
||||
If your photoeditor is erasing the underlying color information, `invoke.py` will give you a big fat
|
||||
warning. If you can't find a way to coax your photoeditor to retain color values under transparent
|
||||
areas, then you can combine the `-I` and `-M` switches to provide both the original unedited image
|
||||
and the masked (partially transparent) image:
|
||||
You also must take care to export the PNG file in such a way that the
|
||||
color information is preserved. There is often an option in the export
|
||||
dialog that lets you specify this.
|
||||
|
||||
If your photoeditor is erasing the underlying color information,
|
||||
`dream.py` will give you a big fat warning. If you can't find a way to
|
||||
coax your photoeditor to retain color values under transparent areas,
|
||||
then you can combine the `-I` and `-M` switches to provide both the
|
||||
original unedited image and the masked (partially transparent) image:
|
||||
|
||||
```bash
|
||||
invoke> "man with cat on shoulder" -I./images/man.png -M./images/man-transparent.png
|
||||
```
|
||||
|
||||
We are hoping to get rid of the need for this workaround in an upcoming release.
|
||||
## **Masking using Text**
|
||||
|
||||
You can also create a mask using a text prompt to select the part of
|
||||
the image you want to alter, using the <a
|
||||
href="https://github.com/timojl/clipseg">clipseg</a> algorithm. This
|
||||
works on any image, not just ones generated by InvokeAI.
|
||||
|
||||
The `--text_mask` (short form `-tm`) option takes two arguments. The
|
||||
first argument is a text description of the part of the image you wish
|
||||
to mask (paint over). If the text description contains a space, you must
|
||||
surround it with quotation marks. The optional second argument is the
|
||||
minimum threshold for the mask classifier's confidence score, described
|
||||
in more detail below.
|
||||
|
||||
To see how this works in practice, here's an image of a still life
|
||||
painting that I got off the web.
|
||||
|
||||
<img src="../assets/still-life-scaled.jpg">
|
||||
|
||||
You can selectively mask out the
|
||||
orange and replace it with a baseball in this way:
|
||||
|
||||
~~~
|
||||
invoke> a baseball -I /path/to/still_life.png -tm orange
|
||||
~~~
|
||||
|
||||
<img src="../assets/still-life-inpainted.png">
|
||||
|
||||
The clipseg classifier produces a confidence score for each region it
|
||||
identifies. Generally regions that score above 0.5 are reliable, but
|
||||
if you are getting too much or too little masking you can adjust the
|
||||
threshold down (to get more mask), or up (to get less). In this
|
||||
example, by passing `-tm` a higher value, we are insisting on a tigher
|
||||
mask. However, if you make it too high, the orange may not be picked
|
||||
up at all!
|
||||
|
||||
~~~
|
||||
invoke> a baseball -I /path/to/breakfast.png -tm orange 0.6
|
||||
~~~
|
||||
|
||||
The `!mask` command may be useful for debugging problems with the
|
||||
text2mask feature. The syntax is `!mask /path/to/image.png -tm <text>
|
||||
<threshold>`
|
||||
|
||||
It will generate three files:
|
||||
|
||||
- The image with the selected area highlighted.
|
||||
- The image with the un-selected area highlighted.
|
||||
- The image with the selected area converted into a black and white
|
||||
image according to the threshold level.
|
||||
|
||||
Note that none of these images are intended to be used as the mask
|
||||
passed to invoke via `-M` and may give unexpected results if you try
|
||||
to use them this way. Instead, use `!mask` for testing that you are
|
||||
selecting the right mask area, and then do inpainting using the
|
||||
best selection term and threshold.
|
||||
|
||||
Here is an example of how `!mask` works:
|
||||
|
||||
```
|
||||
invoke> !mask ./test-pictures/curly.png -tm hair 0.5
|
||||
>> generating masks from ./test-pictures/curly.png
|
||||
>> Initializing clipseg model for text to mask inference
|
||||
Outputs:
|
||||
[941.1] outputs/img-samples/000019.curly.hair.deselected.png: !mask ./test-pictures/curly.png -tm hair 0.5
|
||||
[941.2] outputs/img-samples/000019.curly.hair.selected.png: !mask ./test-pictures/curly.png -tm hair 0.5
|
||||
[941.3] outputs/img-samples/000019.curly.hair.masked.png: !mask ./test-pictures/curly.png -tm hair 0.5
|
||||
```
|
||||
|
||||
**Original image "curly.png"**
|
||||
<img src="../assets/outpainting/curly.png">
|
||||
|
||||
**000019.curly.hair.selected.png**
|
||||
<img src="../assets/inpainting/000019.curly.hair.selected.png">
|
||||
|
||||
**000019.curly.hair.deselected.png**
|
||||
<img src="../assets/inpainting/000019.curly.hair.deselected.png">
|
||||
|
||||
**000019.curly.hair.masked.png**
|
||||
<img src="../assets/inpainting/000019.curly.hair.masked.png">
|
||||
|
||||
It looks like we selected the hair pretty well at the 0.5 threshold
|
||||
(which is the default, so we didn't actually have to specify it), so
|
||||
let's have some fun:
|
||||
|
||||
```
|
||||
invoke> medusa with cobras -I ./test-pictures/curly.png -tm hair 0.5 -C20
|
||||
>> loaded input image of size 512x512 from ./test-pictures/curly.png
|
||||
...
|
||||
Outputs:
|
||||
[946] outputs/img-samples/000024.801380492.png: "medusa with cobras" -s 50 -S 801380492 -W 512 -H 512 -C 20.0 -I ./test-pictures/curly.png -A k_lms -f 0.75
|
||||
```
|
||||
|
||||
<img src="../assets/inpainting/000024.801380492.png">
|
||||
|
||||
### Inpainting is not changing the masked region enough!
|
||||
|
||||
One of the things to understand about how inpainting works is that it
|
||||
is equivalent to running img2img on just the masked (transparent)
|
||||
area. img2img builds on top of the existing image data, and therefore
|
||||
will attempt to preserve colors, shapes and textures to the best of
|
||||
its ability. Unfortunately this means that if you want to make a
|
||||
dramatic change in the inpainted region, for example replacing a red
|
||||
wall with a blue one, the algorithm will fight you.
|
||||
|
||||
You have a couple of options. The first is to increase the values of
|
||||
the requested steps (`-sXXX`), strength (`-f0.XX`), and/or
|
||||
condition-free guidance (`-CXX.X`). If this is not working for you, a
|
||||
more extreme step is to provide the `--inpaint_replace 0.X` (`-r0.X`)
|
||||
option. This value ranges from 0.0 to 1.0. The higher it is the less
|
||||
attention the algorithm will pay to the data underneath the masked
|
||||
region. At high values this will enable you to replace colored regions
|
||||
entirely, but beware that the masked region mayl not blend in with the
|
||||
surrounding unmasked regions as well.
|
||||
|
||||
---
|
||||
|
||||
@ -36,7 +157,7 @@ We are hoping to get rid of the need for this workaround in an upcoming release.
|
||||
|
||||
1. Open image in GIMP.
|
||||
2. Layer->Transparency->Add Alpha Channel
|
||||
3. Use lasoo tool to select region to mask
|
||||
3. Use lasso tool to select region to mask
|
||||
4. Choose Select -> Float to create a floating selection
|
||||
5. Open the Layers toolbar (^L) and select "Floating Selection"
|
||||
6. Set opacity to a value between 0% and 99%
|
||||
@ -44,33 +165,34 @@ We are hoping to get rid of the need for this workaround in an upcoming release.
|
||||
8. In the export dialogue, Make sure the "Save colour values from
|
||||
transparent pixels" checkbox is selected.
|
||||
|
||||
---
|
||||
|
||||
## Recipe for Adobe Photoshop
|
||||
|
||||
1. Open image in Photoshop
|
||||
|
||||
![step1](../assets/step1.png)
|
||||
<div align="center" markdown>![step1](../assets/step1.png)</div>
|
||||
|
||||
2. Use any of the selection tools (Marquee, Lasso, or Wand) to select the area you desire to inpaint.
|
||||
|
||||
![step2](../assets/step2.png)
|
||||
<div align="center" markdown>![step2](../assets/step2.png)</div>
|
||||
|
||||
3. Because we'll be applying a mask over the area we want to preserve, you should now select the inverse by using the ++shift+ctrl+i++ shortcut, or right clicking and using the "Select Inverse" option.
|
||||
|
||||
4. You'll now create a mask by selecting the image layer, and Masking the selection. Make sure that you don't delete any of the undrlying image, or your inpainting results will be dramatically impacted.
|
||||
4. You'll now create a mask by selecting the image layer, and Masking the selection. Make sure that you don't delete any of the underlying image, or your inpainting results will be dramatically impacted.
|
||||
|
||||
![step4](../assets/step4.png)
|
||||
<div align="center" markdown>![step4](../assets/step4.png)</div>
|
||||
|
||||
5. Make sure to hide any background layers that are present. You should see the mask applied to your image layer, and the image on your canvas should display the checkered background.
|
||||
|
||||
![step5](../assets/step5.png)
|
||||
<div align="center" markdown>![step5](../assets/step5.png)</div>
|
||||
|
||||
6. Save the image as a transparent PNG by using the "Save a Copy" option in the File menu, or using the Alt + Ctrl + S keyboard shortcut
|
||||
6. Save the image as a transparent PNG by using `File`-->`Save a Copy` from the menu bar, or by using the keyboard shortcut ++alt+ctrl+s++
|
||||
|
||||
![step6](../assets/step6.png)
|
||||
<div align="center" markdown>![step6](../assets/step6.png)</div>
|
||||
|
||||
7. After following the inpainting instructions above (either through the CLI or the Web UI), marvel at your newfound ability to selectively invoke. Lookin' good!
|
||||
|
||||
![step7](../assets/step7.png)
|
||||
<div align="center" markdown>![step7](../assets/step7.png)</div>
|
||||
|
||||
8. In the export dialogue, Make sure the "Save colour values from transparent pixels" checkbox is selected.
|
||||
|
@ -6,15 +6,13 @@ title: Others
|
||||
|
||||
## **Google Colab**
|
||||
|
||||
Stable Diffusion AI Notebook: <a
|
||||
href="https://colab.research.google.com/github/lstein/stable-diffusion/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb"
|
||||
target="_parent">
|
||||
<img
|
||||
src="https://colab.research.google.com/assets/colab-badge.svg"
|
||||
alt="Open In Colab"/></a> <br> Open and follow instructions to use an isolated environment running
|
||||
Dream.<br>
|
||||
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg){ align="right" }](https://colab.research.google.com/github/lstein/stable-diffusion/blob/main/notebooks/Stable_Diffusion_AI_Notebook.ipynb)
|
||||
|
||||
Output Example: ![Colab Notebook](../assets/colab_notebook.png)
|
||||
Open and follow instructions to use an isolated environment running Dream.
|
||||
|
||||
Output Example:
|
||||
|
||||
![Colab Notebook](../assets/colab_notebook.png)
|
||||
|
||||
---
|
||||
|
||||
@ -28,17 +26,23 @@ for each `invoke>` prompt as shown here:
|
||||
invoke> "pond garden with lotus by claude monet" --seamless -s100 -n4
|
||||
```
|
||||
|
||||
By default this will tile on both the X and Y axes. However, you can also specify specific axes to tile on with `--seamless_axes`.
|
||||
Possible values are `x`, `y`, and `x,y`:
|
||||
```python
|
||||
invoke> "pond garden with lotus by claude monet" --seamless --seamless_axes=x -s100 -n4
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## **Shortcuts: Reusing Seeds**
|
||||
|
||||
Since it is so common to reuse seeds while refining a prompt, there is now a shortcut as of version
|
||||
1.11. Provide a `**-S**` (or `**--seed**`) switch of `-1` to use the seed of the most recent image
|
||||
generated. If you produced multiple images with the `**-n**` switch, then you can go back further
|
||||
using -2, -3, etc. up to the first image generated by the previous command. Sorry, but you can't go
|
||||
1.11. Provide a `-S` (or `--seed`) switch of `-1` to use the seed of the most recent image
|
||||
generated. If you produced multiple images with the `-n` switch, then you can go back further
|
||||
using `-2`, `-3`, etc. up to the first image generated by the previous command. Sorry, but you can't go
|
||||
back further than one command.
|
||||
|
||||
Here's an example of using this to do a quick refinement. It also illustrates using the new `**-G**`
|
||||
Here's an example of using this to do a quick refinement. It also illustrates using the new `-G`
|
||||
switch to turn on upscaling and face enhancement (see previous section):
|
||||
|
||||
```bash
|
||||
@ -58,7 +62,7 @@ outputs/img-samples/000040.3498014304.png: "a cute child playing hopscotch" -G1.
|
||||
## **Weighted Prompts**
|
||||
|
||||
You may weight different sections of the prompt to tell the sampler to attach different levels of
|
||||
priority to them, by adding `:(number)` to the end of the section you wish to up- or downweight. For
|
||||
priority to them, by adding `:<percent>` to the end of the section you wish to up- or downweight. For
|
||||
example consider this prompt:
|
||||
|
||||
```bash
|
||||
@ -71,24 +75,30 @@ combination of integers and floating point numbers, and they do not need to add
|
||||
|
||||
---
|
||||
|
||||
## Thresholding and Perlin Noise Initialization Options
|
||||
## **Thresholding and Perlin Noise Initialization Options**
|
||||
|
||||
Two new options are the thresholding (`--threshold`) and the perlin noise initialization (`--perlin`) options. Thresholding limits the range of the latent values during optimization, which helps combat oversaturation with higher CFG scale values. Perlin noise initialization starts with a percentage (a value ranging from 0 to 1) of perlin noise mixed into the initial noise. Both features allow for more variations and options in the course of generating images.
|
||||
|
||||
For better intuition into what these options do in practice, [here is a graphic demonstrating them both](static/truncation_comparison.jpg) in use. In generating this graphic, perlin noise at initialization was programmatically varied going across on the diagram by values 0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 0.8, 0.9, 1.0; and the threshold was varied going down from
|
||||
For better intuition into what these options do in practice:
|
||||
|
||||
![here is a graphic demonstrating them both](../assets/truncation_comparison.jpg)
|
||||
|
||||
In generating this graphic, perlin noise at initialization was programmatically varied going across on the diagram by values 0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 0.8, 0.9, 1.0; and the threshold was varied going down from
|
||||
0, 1, 2, 3, 4, 5, 10, 20, 100. The other options are fixed, so the initial prompt is as follows (no thresholding or perlin noise):
|
||||
|
||||
```
|
||||
a portrait of a beautiful young lady -S 1950357039 -s 100 -C 20 -A k_euler_a --threshold 0 --perlin 0
|
||||
```bash
|
||||
invoke> "a portrait of a beautiful young lady" -S 1950357039 -s 100 -C 20 -A k_euler_a --threshold 0 --perlin 0
|
||||
```
|
||||
|
||||
Here's an example of another prompt used when setting the threshold to 5 and perlin noise to 0.2:
|
||||
|
||||
```
|
||||
a portrait of a beautiful young lady -S 1950357039 -s 100 -C 20 -A k_euler_a --threshold 5 --perlin 0.2
|
||||
```bash
|
||||
invoke> "a portrait of a beautiful young lady" -S 1950357039 -s 100 -C 20 -A k_euler_a --threshold 5 --perlin 0.2
|
||||
```
|
||||
|
||||
Note: currently the thresholding feature is only implemented for the k-diffusion style samplers, and empirically appears to work best with `k_euler_a` and `k_dpm_2_a`. Using 0 disables thresholding. Using 0 for perlin noise disables using perlin noise for initialization. Finally, using 1 for perlin noise uses only perlin noise for initialization.
|
||||
!!! note
|
||||
|
||||
currently the thresholding feature is only implemented for the k-diffusion style samplers, and empirically appears to work best with `k_euler_a` and `k_dpm_2_a`. Using 0 disables thresholding. Using 0 for perlin noise disables using perlin noise for initialization. Finally, using 1 for perlin noise uses only perlin noise for initialization.
|
||||
|
||||
---
|
||||
|
||||
@ -120,7 +130,7 @@ internet. In the following runs, it will load up the cached versions of the requ
|
||||
`.cache` directory of the system.
|
||||
|
||||
```bash
|
||||
(ldm) ~/stable-diffusion$ python3 ./scripts/preload_models.py
|
||||
(invokeai) ~/stable-diffusion$ python3 ./scripts/preload_models.py
|
||||
preloading bert tokenizer...
|
||||
Downloading: 100%|██████████████████████████████████| 28.0/28.0 [00:00<00:00, 49.3kB/s]
|
||||
Downloading: 100%|██████████████████████████████████| 226k/226k [00:00<00:00, 2.79MB/s]
|
||||
|
@ -25,14 +25,16 @@ implementations.
|
||||
|
||||
Consider this image:
|
||||
|
||||
<div align="center" markdown>
|
||||
![curly_woman](../assets/outpainting/curly.png)
|
||||
</div>
|
||||
|
||||
Pretty nice, but it's annoying that the top of her head is cut
|
||||
off. She's also a bit off center. Let's fix that!
|
||||
|
||||
~~~~
|
||||
```bash
|
||||
invoke> !fix images/curly.png --outcrop top 64 right 64
|
||||
~~~~
|
||||
```
|
||||
|
||||
This is saying to apply the `outcrop` extension by extending the top
|
||||
of the image by 64 pixels, and the right of the image by the same
|
||||
@ -42,7 +44,9 @@ specify any number of pixels to extend. You can also abbreviate
|
||||
|
||||
The result looks like this:
|
||||
|
||||
<div align="center" markdown>
|
||||
![curly_woman_outcrop](../assets/outpainting/curly-outcrop.png)
|
||||
</div>
|
||||
|
||||
The new image is actually slightly larger than the original (576x576,
|
||||
because 64 pixels were added to the top and right sides.)
|
||||
@ -66,33 +70,36 @@ The `outpaint` extension does the same thing, but with subtle
|
||||
differences. Starting with the same image, here is how we would add an
|
||||
additional 64 pixels to the top of the image:
|
||||
|
||||
~~~
|
||||
```bash
|
||||
invoke> !fix images/curly.png --out_direction top 64
|
||||
~~~
|
||||
```
|
||||
|
||||
(you can abbreviate ``--out_direction` as `-D`.
|
||||
(you can abbreviate `--out_direction` as `-D`.
|
||||
|
||||
The result is shown here:
|
||||
|
||||
<div align="center" markdown>
|
||||
![curly_woman_outpaint](../assets/outpainting/curly-outpaint.png)
|
||||
</div>
|
||||
|
||||
Although the effect is similar, there are significant differences from
|
||||
outcropping:
|
||||
|
||||
1. You can only specify one direction to extend at a time.
|
||||
2. The image is **not** resized. Instead, the image is shifted by the specified
|
||||
- You can only specify one direction to extend at a time.
|
||||
- The image is **not** resized. Instead, the image is shifted by the specified
|
||||
number of pixels. If you look carefully, you'll see that less of the lady's
|
||||
torso is visible in the image.
|
||||
3. Because the image dimensions remain the same, there's no rounding
|
||||
- Because the image dimensions remain the same, there's no rounding
|
||||
to multiples of 64.
|
||||
4. Attempting to outpaint larger areas will frequently give rise to ugly
|
||||
- Attempting to outpaint larger areas will frequently give rise to ugly
|
||||
ghosting effects.
|
||||
5. For best results, try increasing the step number.
|
||||
6. If you don't specify a pixel value in -D, it will default to half
|
||||
- For best results, try increasing the step number.
|
||||
- If you don't specify a pixel value in `-D`, it will default to half
|
||||
of the whole image, which is likely not what you want.
|
||||
|
||||
Neither `outpaint` nor `outcrop` are perfect, but we continue to tune
|
||||
and improve them. If one doesn't work, try the other. You may also
|
||||
wish to experiment with other `img2img` arguments, such as `-C`, `-f`
|
||||
and `-s`.
|
||||
!!! tip
|
||||
|
||||
Neither `outpaint` nor `outcrop` are perfect, but we continue to tune
|
||||
and improve them. If one doesn't work, try the other. You may also
|
||||
wish to experiment with other `img2img` arguments, such as `-C`, `-f`
|
||||
and `-s`.
|
||||
|
@ -1,8 +1,9 @@
|
||||
|
||||
---
|
||||
title: Postprocessing
|
||||
---
|
||||
|
||||
# :material-image-edit: Postprocessing
|
||||
|
||||
## Intro
|
||||
|
||||
This extension provides the ability to restore faces and upscale
|
||||
@ -33,13 +34,13 @@ work. These are loaded when you run `scripts/preload_models.py`. If
|
||||
GFPAN is failing with an error, please run the following from the
|
||||
InvokeAI directory:
|
||||
|
||||
~~~~
|
||||
```bash
|
||||
python scripts/preload_models.py
|
||||
~~~~
|
||||
```
|
||||
|
||||
If you do not run this script in advance, the GFPGAN module will attempt
|
||||
to download the models files the first time you try to perform facial
|
||||
reconstruction.
|
||||
reconstruction.
|
||||
|
||||
Alternatively, if you have GFPGAN installed elsewhere, or if you are
|
||||
using an earlier version of this package which asked you to install
|
||||
@ -88,13 +89,13 @@ too.
|
||||
### Example Usage
|
||||
|
||||
```bash
|
||||
invoke> superman dancing with a panda bear -U 2 0.6 -G 0.4
|
||||
invoke> "superman dancing with a panda bear" -U 2 0.6 -G 0.4
|
||||
```
|
||||
|
||||
This also works with img2img:
|
||||
|
||||
```bash
|
||||
invoke> a man wearing a pineapple hat -I path/to/your/file.png -U 2 0.5 -G 0.6
|
||||
invoke> "a man wearing a pineapple hat" -I path/to/your/file.png -U 2 0.5 -G 0.6
|
||||
```
|
||||
|
||||
!!! note
|
||||
@ -122,20 +123,20 @@ In order to setup CodeFormer to work, you need to download the models
|
||||
like with GFPGAN. You can do this either by running
|
||||
`preload_models.py` or by manually downloading the [model
|
||||
file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth)
|
||||
and saving it to `ldm/restoration/codeformer/weights` folder.
|
||||
and saving it to `ldm/invoke/restoration/codeformer/weights` folder.
|
||||
|
||||
You can use `-ft` prompt argument to swap between CodeFormer and the
|
||||
default GFPGAN. The above mentioned `-G` prompt argument will allow
|
||||
you to control the strength of the restoration effect.
|
||||
|
||||
### Usage:
|
||||
### Usage
|
||||
|
||||
The following command will perform face restoration with CodeFormer instead of
|
||||
the default gfpgan.
|
||||
|
||||
`<prompt> -G 0.8 -ft codeformer`
|
||||
|
||||
### Other Options:
|
||||
### Other Options
|
||||
|
||||
- `-cf` - cf or CodeFormer Fidelity takes values between `0` and `1`. 0 produces
|
||||
high quality results but low accuracy and 1 produces lower quality results but
|
||||
@ -161,7 +162,7 @@ previously-generated file. Just use the syntax `!fix path/to/file.png
|
||||
2X for a file named `./outputs/img-samples/000044.2945021133.png`,
|
||||
just run:
|
||||
|
||||
```
|
||||
```bash
|
||||
invoke> !fix ./outputs/img-samples/000044.2945021133.png -G 0.8 -U 2
|
||||
```
|
||||
|
||||
@ -169,7 +170,7 @@ A new file named `000044.2945021133.fixed.png` will be created in the output
|
||||
directory. Note that the `!fix` command does not replace the original file,
|
||||
unlike the behavior at generate time.
|
||||
|
||||
### Disabling:
|
||||
### Disabling
|
||||
|
||||
If, for some reason, you do not wish to load the GFPGAN and/or ESRGAN libraries,
|
||||
you can disable them on the invoke.py command line with the `--no_restore` and
|
||||
|
@ -1,8 +1,8 @@
|
||||
---
|
||||
title: Prompting Features
|
||||
title: Prompting-Features
|
||||
---
|
||||
|
||||
# :octicons-command-palette-24: Prompting Features
|
||||
# :octicons-command-palette-24: Prompting-Features
|
||||
|
||||
## **Reading Prompts from a File**
|
||||
|
||||
@ -19,14 +19,15 @@ innovative packaging for a squid's dinner -S137038382
|
||||
Then pass this file's name to `invoke.py` when you invoke it:
|
||||
|
||||
```bash
|
||||
(ldm) ~/stable-diffusion$ python3 scripts/invoke.py --from_file "path/to/prompts.txt"
|
||||
(invokeai) ~/stable-diffusion$ python3 scripts/invoke.py --from_file "path/to/prompts.txt"
|
||||
```
|
||||
|
||||
You may read a series of prompts from standard input by providing a filename of `-`:
|
||||
|
||||
```bash
|
||||
(ldm) ~/stable-diffusion$ echo "a beautiful day" | python3 scripts/invoke.py --from_file -
|
||||
(invokeai) ~/stable-diffusion$ echo "a beautiful day" | python3 scripts/invoke.py --from_file -
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## **Negative and Unconditioned Prompts**
|
||||
@ -34,7 +35,7 @@ You may read a series of prompts from standard input by providing a filename of
|
||||
Any words between a pair of square brackets will instruct Stable
|
||||
Diffusion to attempt to ban the concept from the generated image.
|
||||
|
||||
```bash
|
||||
```text
|
||||
this is a test prompt [not really] to make you understand [cool] how this works.
|
||||
```
|
||||
|
||||
@ -46,25 +47,33 @@ original prompt:
|
||||
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
|
||||
<div align="center" markdown>
|
||||
![step1](../assets/negative_prompt_walkthru/step1.png)
|
||||
</div>
|
||||
|
||||
That image has a woman, so if we want the horse without a rider, we can influence the image not to have a woman by putting [woman] in the prompt, like this:
|
||||
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
|
||||
<div align="center" markdown>
|
||||
![step2](../assets/negative_prompt_walkthru/step2.png)
|
||||
</div>
|
||||
|
||||
That's nice - but say we also don't want the image to be quite so blue. We can add "blue" to the list of negative prompts, so it's now [woman blue]:
|
||||
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
|
||||
<div align="center" markdown>
|
||||
![step3](../assets/negative_prompt_walkthru/step3.png)
|
||||
</div>
|
||||
|
||||
Getting close - but there's no sense in having a saddle when our horse doesn't have a rider, so we'll add one more negative prompt: [woman blue saddle].
|
||||
|
||||
`#!bash "A fantastical translucent poney made of water and foam, ethereal, radiant, hyperalism, scottish folklore, digital painting, artstation, concept art, smooth, 8 k frostbite 3 engine, ultra detailed, art by artgerm and greg rutkowski and magali villeneuve [woman blue saddle]" -s 20 -W 512 -H 768 -C 7.5 -A k_euler_a -S 1654590180`
|
||||
|
||||
<div align="center" markdown>
|
||||
![step4](../assets/negative_prompt_walkthru/step4.png)
|
||||
</div>
|
||||
|
||||
!!! notes "Notes about this feature:"
|
||||
|
||||
@ -101,44 +110,58 @@ illustrate, here are three images generated using various combinations
|
||||
of blend weights. As usual, unless you fix the seed, the prompts will give you
|
||||
different results each time you run them.
|
||||
|
||||
---
|
||||
|
||||
<div align="center" markdown>
|
||||
### "blue sphere, red cube, hybrid"
|
||||
</div>
|
||||
|
||||
This example doesn't use melding at all and represents the default way
|
||||
of mixing concepts.
|
||||
|
||||
<img src="../assets/prompt-blending/blue-sphere-red-cube-hybrid.png" width=256>
|
||||
<div align="center" markdown>
|
||||
![blue-sphere-red-cube-hyprid](../assets/prompt-blending/blue-sphere-red-cube-hybrid.png)
|
||||
</div>
|
||||
|
||||
It's interesting to see how the AI expressed the concept of "cube" as
|
||||
the four quadrants of the enclosing frame. If you look closely, there
|
||||
is depth there, so the enclosing frame is actually a cube.
|
||||
|
||||
<div align="center" markdown>
|
||||
### "blue sphere:0.25 red cube:0.75 hybrid"
|
||||
|
||||
<img src="../assets/prompt-blending/blue-sphere-0.25-red-cube-0.75-hybrid.png" width=256>
|
||||
![blue-sphere-25-red-cube-75](../assets/prompt-blending/blue-sphere-0.25-red-cube-0.75-hybrid.png)
|
||||
</div>
|
||||
|
||||
Now that's interesting. We get neither a blue sphere nor a red cube,
|
||||
but a red sphere embedded in a brick wall, which represents a melding
|
||||
of concepts within the AI's "latent space" of semantic
|
||||
representations. Where is Ludwig Wittgenstein when you need him?
|
||||
|
||||
<div align="center" markdown>
|
||||
### "blue sphere:0.75 red cube:0.25 hybrid"
|
||||
|
||||
<img src="../assets/prompt-blending/blue-sphere-0.75-red-cube-0.25-hybrid.png" width=256>
|
||||
![blue-sphere-75-red-cube-25](../assets/prompt-blending/blue-sphere-0.75-red-cube-0.25-hybrid.png)
|
||||
</div>
|
||||
|
||||
Definitely more blue-spherey. The cube is gone entirely, but it's
|
||||
really cool abstract art.
|
||||
|
||||
<div align="center" markdown>
|
||||
### "blue sphere:0.5 red cube:0.5 hybrid"
|
||||
|
||||
<img src="../assets/prompt-blending/blue-sphere-0.5-red-cube-0.5-hybrid.png" width=256>
|
||||
![blue-sphere-5-red-cube-5-hybrid](../assets/prompt-blending/blue-sphere-0.5-red-cube-0.5-hybrid.png)
|
||||
</div>
|
||||
|
||||
Whoa...! I see blue and red, but no spheres or cubes. Is the word
|
||||
"hybrid" summoning up the concept of some sort of scifi creature?
|
||||
Let's find out.
|
||||
|
||||
<div align="center" markdown>
|
||||
### "blue sphere:0.5 red cube:0.5"
|
||||
|
||||
<img src="../assets/prompt-blending/blue-sphere-0.5-red-cube-0.5.png" width=256>
|
||||
![blue-sphere-5-red-cube-5](../assets/prompt-blending/blue-sphere-0.5-red-cube-0.5.png)
|
||||
</div>
|
||||
|
||||
Indeed, removing the word "hybrid" produces an image that is more like
|
||||
what we'd expect.
|
||||
@ -146,4 +169,3 @@ what we'd expect.
|
||||
In conclusion, prompt blending is great for exploring creative space,
|
||||
but can be difficult to direct. A forthcoming release of InvokeAI will
|
||||
feature more deterministic prompt weighting.
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
---
|
||||
title: TEXTUAL_INVERSION
|
||||
title: Textual-Inversion
|
||||
---
|
||||
|
||||
# :material-file-document-plus-outline: TEXTUAL_INVERSION
|
||||
# :material-file-document: Textual Inversion
|
||||
|
||||
## **Personalizing Text-to-Image Generation**
|
||||
|
||||
@ -23,13 +23,13 @@ As the default backend is not available on Windows, if you're using that
|
||||
platform, set the environment variable `PL_TORCH_DISTRIBUTED_BACKEND` to `gloo`
|
||||
|
||||
```bash
|
||||
python3 ./main.py --base ./configs/stable-diffusion/v1-finetune.yaml \
|
||||
--actual_resume ./models/ldm/stable-diffusion-v1/model.ckpt \
|
||||
-t \
|
||||
-n my_cat \
|
||||
--gpus 0 \
|
||||
--data_root D:/textual-inversion/my_cat \
|
||||
--init_word 'cat'
|
||||
python3 ./main.py -t \
|
||||
--base ./configs/stable-diffusion/v1-finetune.yaml \
|
||||
--actual_resume ./models/ldm/stable-diffusion-v1/model.ckpt \
|
||||
-n my_cat \
|
||||
--gpus 0 \
|
||||
--data_root D:/textual-inversion/my_cat \
|
||||
--init_word 'cat'
|
||||
```
|
||||
|
||||
During the training process, files will be created in
|
||||
@ -59,7 +59,8 @@ Once the model is trained, specify the trained .pt or .bin file when starting
|
||||
invoke using
|
||||
|
||||
```bash
|
||||
python3 ./scripts/invoke.py --embedding_path /path/to/embedding.pt
|
||||
python3 ./scripts/invoke.py \
|
||||
--embedding_path /path/to/embedding.pt
|
||||
```
|
||||
|
||||
Then, to utilize your subject at the invoke prompt
|
||||
@ -80,9 +81,9 @@ LDM checkpoints using:
|
||||
|
||||
```bash
|
||||
python3 ./scripts/merge_embeddings.py \
|
||||
--manager_ckpts /path/to/first/embedding.pt \
|
||||
[</path/to/second/embedding.pt>,[...]] \
|
||||
--output_path /path/to/output/embedding.pt
|
||||
--manager_ckpts /path/to/first/embedding.pt \
|
||||
[</path/to/second/embedding.pt>,[...]] \
|
||||
--output_path /path/to/output/embedding.pt
|
||||
```
|
||||
|
||||
Credit goes to rinongal and the repository
|
||||
|
@ -25,10 +25,11 @@ variations to create the desired image of Xena, Warrior Princess.
|
||||
|
||||
## Step 1 -- Find a base image that you like
|
||||
|
||||
The prompt we will use throughout is
|
||||
`lucy lawless as xena, warrior princess, character portrait, high resolution.`
|
||||
The prompt we will use throughout is:
|
||||
|
||||
This will be indicated as `prompt` in the examples below.
|
||||
`#!bash "lucy lawless as xena, warrior princess, character portrait, high resolution."`
|
||||
|
||||
This will be indicated as `#!bash "prompt"` in the examples below.
|
||||
|
||||
First we let SD create a series of images in the usual way, in this case
|
||||
requesting six iterations:
|
||||
@ -45,7 +46,10 @@ Outputs:
|
||||
./outputs/Xena/000001.3357757885.png: "prompt" -s50 -W512 -H512 -C7.5 -Ak_lms -S3357757885
|
||||
```
|
||||
|
||||
<figure markdown>
|
||||
![var1](../assets/variation_walkthru/000001.3357757885.png)
|
||||
<figcaption> Seed 3357757885 looks nice </figcaption>
|
||||
</figure>
|
||||
|
||||
---
|
||||
|
||||
@ -77,9 +81,15 @@ used to generate it.
|
||||
This gives us a series of closely-related variations, including the two shown
|
||||
here.
|
||||
|
||||
<figure markdown>
|
||||
![var2](../assets/variation_walkthru/000002.3647897225.png)
|
||||
<figcaption>subseed 3647897225</figcaption>
|
||||
</figure>
|
||||
|
||||
<figure markdown>
|
||||
![var3](../assets/variation_walkthru/000002.1614299449.png)
|
||||
<figcaption>subseed 1614299449</figcaption>
|
||||
</figure>
|
||||
|
||||
I like the expression on Xena's face in the first one (subseed 3647897225), and
|
||||
the armor on her shoulder in the second one (subseed 1614299449). Can we combine
|
||||
@ -97,7 +107,10 @@ Outputs:
|
||||
Here we are providing equal weights (0.1 and 0.1) for both the subseeds. The
|
||||
resulting image is close, but not exactly what I wanted:
|
||||
|
||||
<figure markdown>
|
||||
![var4](../assets/variation_walkthru/000003.1614299449.png)
|
||||
<figcaption> subseed 1614299449 </figcaption>
|
||||
</figure>
|
||||
|
||||
We could either try combining the images with different weights, or we can
|
||||
generate more variations around the almost-but-not-quite image. We do the
|
||||
@ -118,8 +131,23 @@ Outputs:
|
||||
This produces six images, all slight variations on the combination of the chosen
|
||||
two images. Here's the one I like best:
|
||||
|
||||
<figure markdown>
|
||||
![var5](../assets/variation_walkthru/000004.3747154981.png)
|
||||
<figcaption> subseed 3747154981 </figcaption>
|
||||
</figure>
|
||||
|
||||
As you can see, this is a very powerful tool, which when combined with subprompt
|
||||
weighting, gives you great control over the content and quality of your
|
||||
generated images.
|
||||
|
||||
## Variations and Samplers
|
||||
|
||||
The sampler you choose has a strong effect on variation strength. Some
|
||||
samplers, such as `k_euler_a` are very "creative" and produce significant
|
||||
amounts of image-to-image variation even when the seed is fixed and the
|
||||
`-v` argument is very low. Others are more deterministic. Feel free to
|
||||
experiment until you find the combination that you like.
|
||||
|
||||
Also be aware of the [Perlin Noise](OTHER.md#thresholding-and-perlin-noise-initialization-options)
|
||||
feature, which provides another way of introducing variability into your
|
||||
image generation requests.
|
||||
|
@ -2,12 +2,14 @@
|
||||
title: InvokeAI Web Server
|
||||
---
|
||||
|
||||
# :material-web: InvokeAI Web Server
|
||||
|
||||
As of version 2.0.0, this distribution comes with a full-featured web
|
||||
server (see screenshot). To use it, run the `invoke.py` script by
|
||||
adding the `--web` option:
|
||||
|
||||
```bash
|
||||
(ldm) ~/InvokeAI$ python3 scripts/invoke.py --web
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web
|
||||
```
|
||||
|
||||
You can then connect to the server by pointing your web browser at
|
||||
@ -17,7 +19,7 @@ either the IP address of the host you are running it on, or the
|
||||
wildcard `0.0.0.0`. For example:
|
||||
|
||||
```bash
|
||||
(ldm) ~/InvokeAI$ python3 scripts/invoke.py --web --host 0.0.0.0
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py --web --host 0.0.0.0
|
||||
```
|
||||
|
||||
# Quick guided walkthrough of the WebGUI's features
|
||||
@ -25,7 +27,7 @@ wildcard `0.0.0.0`. For example:
|
||||
While most of the WebGUI's features are intuitive, here is a guided
|
||||
walkthrough through its various components.
|
||||
|
||||
<img src="../assets/invoke-web-server-1.png" width=640>
|
||||
![Invoke Web Server - Major Components](../assets/invoke-web-server-1.png){:width="640px"}
|
||||
|
||||
The screenshot above shows the Text to Image tab of the WebGUI. There
|
||||
are three main sections:
|
||||
@ -53,7 +55,9 @@ There are also a series of icons to the left of the control panel (see
|
||||
highlighted area in the screenshot below) which select among a series
|
||||
of tabs for performing different types of operations.
|
||||
|
||||
<img src="../assets/invoke-web-server-2.png" width=512>
|
||||
<figure markdown>
|
||||
![Invoke Web Server - Control Panel](../assets/invoke-web-server-2.png){:width="512px"}
|
||||
</figure>
|
||||
|
||||
From top to bottom, these are:
|
||||
|
||||
@ -86,51 +90,51 @@ using its IP address or domain name.
|
||||
|
||||
#### Basics
|
||||
|
||||
3. Generate an image by typing *strawberry sushi* into the large
|
||||
1. Generate an image by typing *strawberry sushi* into the large
|
||||
prompt field on the upper left and then clicking on the Invoke button
|
||||
(the one with the Camera icon). After a short wait, you'll see a large
|
||||
image of sushi in the image panel, and a new thumbnail in the gallery
|
||||
on the right.
|
||||
|
||||
If you need more room on the screen, you can turn the gallery off
|
||||
by clicking on the **x** to the right of "Your Invocations". You can
|
||||
turn it back on later by clicking the image icon that appears in the
|
||||
gallery's place.
|
||||
If you need more room on the screen, you can turn the gallery off
|
||||
by clicking on the **x** to the right of "Your Invocations". You can
|
||||
turn it back on later by clicking the image icon that appears in the
|
||||
gallery's place.
|
||||
|
||||
The images are written into the directory indicated by the `--outdir`
|
||||
option provided at script launch time. By default, this is
|
||||
`outputs/img-samples` under the InvokeAI directory.
|
||||
The images are written into the directory indicated by the `--outdir`
|
||||
option provided at script launch time. By default, this is
|
||||
`outputs/img-samples` under the InvokeAI directory.
|
||||
|
||||
4. Generate a bunch of strawberry sushi images by increasing the
|
||||
2. Generate a bunch of strawberry sushi images by increasing the
|
||||
number of requested images by adjusting the Images counter just below
|
||||
the Camera button. As each is generated, it will be added to the
|
||||
gallery. You can switch the active image by clicking on the gallery
|
||||
thumbnails.
|
||||
|
||||
5. Try playing with different settings, including image width and
|
||||
3. Try playing with different settings, including image width and
|
||||
height, the Sampler, the Steps and the CFG scale.
|
||||
|
||||
Image *Width* and *Height* do what you'd expect. However, be aware that
|
||||
larger images consume more VRAM memory and take longer to generate.
|
||||
Image *Width* and *Height* do what you'd expect. However, be aware that
|
||||
larger images consume more VRAM memory and take longer to generate.
|
||||
|
||||
The *Sampler* controls how the AI selects the image to display. Some
|
||||
samplers are more "creative" than others and will produce a wider
|
||||
range of variations (see next section). Some samplers run faster than
|
||||
others.
|
||||
The *Sampler* controls how the AI selects the image to display. Some
|
||||
samplers are more "creative" than others and will produce a wider
|
||||
range of variations (see next section). Some samplers run faster than
|
||||
others.
|
||||
|
||||
*Steps* controls how many noising/denoising/sampling steps the AI will
|
||||
take. The higher this value, the more refined the image will be, but
|
||||
the longer the image will take to generate. A typical strategy is to
|
||||
generate images with a low number of steps in order to select one to
|
||||
work on further, and then regenerate it using a higher number of
|
||||
steps.
|
||||
*Steps* controls how many noising/denoising/sampling steps the AI will
|
||||
take. The higher this value, the more refined the image will be, but
|
||||
the longer the image will take to generate. A typical strategy is to
|
||||
generate images with a low number of steps in order to select one to
|
||||
work on further, and then regenerate it using a higher number of
|
||||
steps.
|
||||
|
||||
The *CFG Scale* controls how hard the AI tries to match the generated
|
||||
image to the input prompt. You can go as high or low as you like, but
|
||||
generally values greater than 20 won't improve things much, and values
|
||||
lower than 5 will produce unexpected images. There are complex
|
||||
interactions between *Steps*, *CFG Scale* and the *Sampler*, so
|
||||
experiment to find out what works for you.
|
||||
The *CFG Scale* controls how hard the AI tries to match the generated
|
||||
image to the input prompt. You can go as high or low as you like, but
|
||||
generally values greater than 20 won't improve things much, and values
|
||||
lower than 5 will produce unexpected images. There are complex
|
||||
interactions between *Steps*, *CFG Scale* and the *Sampler*, so
|
||||
experiment to find out what works for you.
|
||||
|
||||
6. To regenerate a previously-generated image, select the image you
|
||||
want and click *Use All*. This loads the text prompt and other
|
||||
@ -138,8 +142,8 @@ original settings into the control panel. If you then press *Invoke*
|
||||
it will regenerate the image exactly. You can also selectively modify
|
||||
the prompt or other settings to tweak the image.
|
||||
|
||||
Alternatively, you may click on *Use Seed* to load just the image's
|
||||
seed, and leave other settings unchanged.
|
||||
Alternatively, you may click on *Use Seed* to load just the image's
|
||||
seed, and leave other settings unchanged.
|
||||
|
||||
7. To regenerate a Stable Diffusion image that was generated by
|
||||
another SD package, you need to know its text prompt and its
|
||||
@ -152,21 +156,21 @@ steps and dimensions, but it will (usually) be close.
|
||||
|
||||
#### Variations on a theme
|
||||
|
||||
5. Let's try generating some variations. Select your favorite sushi
|
||||
1. Let's try generating some variations. Select your favorite sushi
|
||||
image from the gallery to load it. Then select "Use All" from the list
|
||||
of buttons above. This will load up all the settings used to generate
|
||||
this image, including its unique seed.
|
||||
|
||||
Go down to the Variations section of the Control Panel and set the
|
||||
button to On. Set Variation Amount to 0.2 to generate a modest
|
||||
number of variations on the image, and also set the Image counter to
|
||||
4. Press the `invoke` button. This will generate a series of related
|
||||
images. To obtain smaller variations, just lower the Variation
|
||||
Amount. You may also experiment with changing the Sampler. Some
|
||||
samplers generate more variability than others. *k_euler_a* is
|
||||
particularly creative, while *ddim* is pretty conservative.
|
||||
Go down to the Variations section of the Control Panel and set the
|
||||
button to On. Set Variation Amount to 0.2 to generate a modest
|
||||
number of variations on the image, and also set the Image counter to
|
||||
`4`. Press the `invoke` button. This will generate a series of related
|
||||
images. To obtain smaller variations, just lower the Variation
|
||||
Amount. You may also experiment with changing the Sampler. Some
|
||||
samplers generate more variability than others. *k_euler_a* is
|
||||
particularly creative, while *ddim* is pretty conservative.
|
||||
|
||||
6. For even more variations, experiment with increasing the setting
|
||||
2. For even more variations, experiment with increasing the setting
|
||||
for *Perlin*. This adds a bit of noise to the image generation
|
||||
process. Note that values of Perlin noise greater than 0.15 produce
|
||||
poor images for several of the samplers.
|
||||
@ -179,7 +183,7 @@ particular issues with generating reallistic eyes. InvokeAI provides
|
||||
the ability to reconstruct faces using either the GFPGAN or CodeFormer
|
||||
libraries. For more information see [POSTPROCESS](POSTPROCESS.md).
|
||||
|
||||
7. Invoke a prompt that generates a mangled face. A prompt that often
|
||||
1. Invoke a prompt that generates a mangled face. A prompt that often
|
||||
gives this is "portrait of a lawyer, 3/4 shot" (this is not intended
|
||||
as a slur against lawyers!) Once you have an image that needs some
|
||||
touching up, load it into the Image panel, and press the button with
|
||||
@ -188,15 +192,16 @@ box will appear. Leave *Strength* at 0.8 and press *Restore Faces". If
|
||||
all goes well, the eyes and other aspects of the face will be improved
|
||||
(see the second screenshot)
|
||||
|
||||
<img src="../assets/invoke-web-server-3.png">
|
||||
<img src="../assets/invoke-web-server-4.png">
|
||||
![Invoke Web Server - Original Image](../assets/invoke-web-server-3.png)
|
||||
|
||||
The facial reconstruction *Strength* field adjusts how aggressively
|
||||
the face library will try to alter the face. It can be as high as 1.0,
|
||||
but be aware that this often softens the face airbrush style, losing
|
||||
some details. The default 0.8 is usually sufficient.
|
||||
![Invoke Web Server - Retouched Image](../assets/invoke-web-server-4.png)
|
||||
|
||||
8. "Upscaling" is the process of increasing the size of an image while
|
||||
The facial reconstruction *Strength* field adjusts how aggressively
|
||||
the face library will try to alter the face. It can be as high as 1.0,
|
||||
but be aware that this often softens the face airbrush style, losing
|
||||
some details. The default 0.8 is usually sufficient.
|
||||
|
||||
2. "Upscaling" is the process of increasing the size of an image while
|
||||
retaining the sharpness. InvokeAI uses an external library called
|
||||
"ESRGAN" to do this. To invoke upscaling, simply select an image and
|
||||
press the *HD* button above it. You can select between 2X and 4X
|
||||
@ -204,7 +209,7 @@ upscaling, and adjust the upscaling strength, which has much the same
|
||||
meaning as in facial reconstruction. Try running this on one of your
|
||||
previously-generated images.
|
||||
|
||||
9. Finally, you can run facial reconstruction and/or upscaling
|
||||
3. Finally, you can run facial reconstruction and/or upscaling
|
||||
automatically after each Invocation. Go to the Advanced Options
|
||||
section of the Control Panel and turn on *Restore Face* and/or
|
||||
*Upscale*.
|
||||
@ -222,28 +227,32 @@ and
|
||||
[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png).
|
||||
Download these images to your local machine now to continue with the walkthrough.
|
||||
|
||||
10. Click on the *Image to Image* tab icon, which is the second icon
|
||||
1. Click on the *Image to Image* tab icon, which is the second icon
|
||||
from the top on the left-hand side of the screen:
|
||||
|
||||
<img src="../assets/invoke-web-server-5.png">
|
||||
<figure markdown>
|
||||
![Invoke Web Server - Image to Image Icon](../assets/invoke-web-server-5.png)
|
||||
</figure>
|
||||
|
||||
This will bring you to a screen similar to the one shown here:
|
||||
This will bring you to a screen similar to the one shown here:
|
||||
|
||||
<img src="../assets/invoke-web-server-6.png" width=640>
|
||||
<figure markdown>
|
||||
![Invoke Web Server - Image to Image Tab](../assets/invoke-web-server-6.png){:width="640px"}
|
||||
</figure>
|
||||
|
||||
Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or
|
||||
2. Drag-and-drop the Lincoln-and-Parrot image into the Image panel, or
|
||||
click the blank area to get an upload dialog. The image will load into
|
||||
an area marked *Initial Image*. (The WebGUI will also load the most
|
||||
recently-generated image from the gallery into a section on the left,
|
||||
but this image will be replaced in the next step.)
|
||||
|
||||
11. Go to the prompt box and type *old sea captain with raven on
|
||||
3. Go to the prompt box and type *old sea captain with raven on
|
||||
shoulder* and press Invoke. A derived image will appear to the right
|
||||
of the original one:
|
||||
|
||||
<img src="../assets/invoke-web-server-7.png" width=640>
|
||||
![Invoke Web Server - Image to Image example](../assets/invoke-web-server-7.png){:width="640px"}
|
||||
|
||||
12. Experiment with the different settings. The most influential one
|
||||
4. Experiment with the different settings. The most influential one
|
||||
in Image to Image is *Image to Image Strength* located about midway
|
||||
down the control panel. By default it is set to 0.75, but can range
|
||||
from 0.0 to 0.99. The higher the value, the more of the original image
|
||||
@ -253,7 +262,7 @@ the Sampler and CFG Scale also influence the final result. You can
|
||||
also generate variations in the same way as described in Text to
|
||||
Image.
|
||||
|
||||
13. What if we only want to change certain part(s) of the image and
|
||||
5. What if we only want to change certain part(s) of the image and
|
||||
leave the rest intact? This is called Inpainting, and a future version
|
||||
of the InvokeAI web server will provide an interactive painting canvas
|
||||
on which you can directly draw the areas you wish to Inpaint into. For
|
||||
@ -261,16 +270,30 @@ now, you can achieve this effect by using an external photoeditor tool
|
||||
to make one or more regions of the image transparent as described in
|
||||
[INPAINTING.md] and uploading that.
|
||||
|
||||
The file
|
||||
[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png)
|
||||
is a version of the earlier image in which the area around the parrot
|
||||
has been replaced with transparency. Click on the "x" in the upper
|
||||
right of the Initial Image and upload the transparent version. Using
|
||||
the same prompt "old sea captain with raven on shoulder" try Invoking
|
||||
an image. This time, only the parrot will be replaced, leaving the
|
||||
rest of the original image intact:
|
||||
The file
|
||||
[Lincoln-and-Parrot-512-transparent.png](../assets/Lincoln-and-Parrot-512-transparent.png)
|
||||
is a version of the earlier image in which the area around the parrot
|
||||
has been replaced with transparency. Click on the "x" in the upper
|
||||
right of the Initial Image and upload the transparent version. Using
|
||||
the same prompt "old sea captain with raven on shoulder" try Invoking
|
||||
an image. This time, only the parrot will be replaced, leaving the
|
||||
rest of the original image intact:
|
||||
|
||||
<img src="../assets/invoke-web-server-8.png" width=640>
|
||||
<figure markdown>
|
||||
![Invoke Web Server - Inpainting](../assets/invoke-web-server-8.png){:width="640px"}
|
||||
</figure>
|
||||
|
||||
6. Would you like to modify a previously-generated image using the
|
||||
Image to Image facility? Easy! While in the Image to Image panel,
|
||||
hover over any of the gallery images to see a little menu of icons pop
|
||||
up. Click the picture icon to instantly send the selected image to
|
||||
Image to Image as the initial image.
|
||||
|
||||
You can do the same from the Text to Image tab by clicking on the
|
||||
picture icon above the central image panel. The screenshot below
|
||||
shows where the "use as initial image" icons are located.
|
||||
|
||||
![Invoke Web Server - Use as Image Links](../assets/invoke-web-server-9.png){:width="640px"}
|
||||
|
||||
## Parting remarks
|
||||
|
||||
@ -282,53 +305,54 @@ were not covered here.
|
||||
The WebGUI is only rapid development. Check back regularly for
|
||||
updates!
|
||||
|
||||
# Reference
|
||||
## Reference
|
||||
|
||||
## Additional Options
|
||||
`--web_develop` - Starts the web server in development mode.
|
||||
|
||||
`--web_verbose` - Enables verbose logging
|
||||
|
||||
`--cors [CORS ...]` - Additional allowed origins, comma-separated
|
||||
|
||||
`--host HOST` - Web server: Host or IP to listen on. Set to 0.0.0.0 to
|
||||
accept traffic from other devices on your network.
|
||||
|
||||
`--port PORT` - Web server: Port to listen on
|
||||
|
||||
`--gui` - Start InvokeAI GUI - This is the "desktop mode" version of the web app. It uses Flask
|
||||
to create a desktop app experience of the webserver.
|
||||
### Additional Options
|
||||
|
||||
parameter <img width=160 align="right"> | effect
|
||||
-- | --
|
||||
`--web_develop` | Starts the web server in development mode.
|
||||
`--web_verbose` | Enables verbose logging
|
||||
`--cors [CORS ...]` | Additional allowed origins, comma-separated
|
||||
`--host HOST` | Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network.
|
||||
`--port PORT` | Web server: Port to listen on
|
||||
`--gui` | Start InvokeAI GUI - This is the "desktop mode" version of the web app. It uses Flask to create a desktop app experience of the webserver.
|
||||
|
||||
## Web Specific Features
|
||||
### Web Specific Features
|
||||
|
||||
The web experience offers an incredibly easy-to-use experience for interacting with the InvokeAI toolkit.
|
||||
For detailed guidance on individual features, see the Feature-specific help documents available in this directory.
|
||||
Note that the latest functionality available in the CLI may not always be available in the Web interface.
|
||||
|
||||
### Dark Mode & Light Mode
|
||||
#### Dark Mode & Light Mode
|
||||
|
||||
The InvokeAI interface is available in a nano-carbon black & purple Dark Mode, and a "burn your eyes out Nosferatu" Light Mode. These can be toggled by clicking the Sun/Moon icons at the top right of the interface.
|
||||
|
||||
![InvokeAI Web Server - Dark Mode](../assets/invoke_web_dark.png)
|
||||
|
||||
![InvokeAI Web Server - Light Mode](../assets/invoke_web_light.png)
|
||||
|
||||
### Invocation Toolbar
|
||||
The left side of the InvokeAI interface is available for customizing the prompt and the settings used for invoking your new image. Typing your prompt into the open text field and clicking the Invoke button will produce the image based on the settings configured in the toolbar.
|
||||
#### Invocation Toolbar
|
||||
|
||||
The left side of the InvokeAI interface is available for customizing the prompt and the settings used for invoking your new image. Typing your prompt into the open text field and clicking the Invoke button will produce the image based on the settings configured in the toolbar.
|
||||
|
||||
See below for additional documentation related to each feature:
|
||||
|
||||
- [Core Prompt Settings](./CLI.md)
|
||||
- [Variations](./VARIATIONS.md)
|
||||
- [Upscaling](./UPSCALE.md)
|
||||
- [Upscaling](./POSTPROCESS.md#upscaling)
|
||||
- [Image to Image](./IMG2IMG.md)
|
||||
- [Inpainting](./INPAINTING.md)
|
||||
- [Other](./OTHER.md)
|
||||
|
||||
### Invocation Gallery
|
||||
#### Invocation Gallery
|
||||
|
||||
The currently selected --outdir (or the default outputs folder) will display all previously generated files on load. As new invocations are generated, these will be dynamically added to the gallery, and can be previewed by selecting them. Each image also has a simple set of actions (e.g., Delete, Use Seed, Use All Parameters, etc.) that can be accessed by hovering over the image.
|
||||
|
||||
### Image Workspace
|
||||
#### Image Workspace
|
||||
|
||||
When an image from the Invocation Gallery is selected, or is generated, the image will be displayed within the center of the interface. A quickbar of common image interactions are displayed along the top of the image, including:
|
||||
|
||||
- Use image in the `Image to Image` workflow
|
||||
- Initialize Face Restoration on the selected file
|
||||
- Initialize Upscaling on the selected file
|
||||
@ -337,4 +361,9 @@ When an image from the Invocation Gallery is selected, or is generated, the imag
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
A huge shout-out to the core team working to make this vision a reality, including [psychedelicious](https://github.com/psychedelicious), [Kyle0654](https://github.com/Kyle0654) and [blessedcoolant](https://github.com/blessedcoolant). [hipsterusername](https://github.com/hipsterusername) was the team's unofficial cheerleader and added tooltips/docs.
|
||||
A huge shout-out to the core team working to make this vision a
|
||||
reality, including
|
||||
[psychedelicious](https://github.com/psychedelicious),
|
||||
[Kyle0654](https://github.com/Kyle0654) and
|
||||
[blessedcoolant](https://github.com/blessedcoolant). [hipsterusername](https://github.com/hipsterusername)
|
||||
was the team's unofficial cheerleader and added tooltips/docs.
|
||||
|
@ -1,8 +1,8 @@
|
||||
---
|
||||
title: SAMPLER CONVERGENCE
|
||||
title: Sampler Convergence
|
||||
---
|
||||
|
||||
## *Sampler Convergence*
|
||||
# :material-palette-advanced: *Sampler Convergence*
|
||||
|
||||
As features keep increasing, making the right choices for your needs can become increasingly difficult. What sampler to use? And for how many steps? Do you change the CFG value? Do you use prompt weighting? Do you allow variations?
|
||||
|
||||
@ -14,12 +14,14 @@ In this document, we will talk about sampler convergence.
|
||||
|
||||
Looking for a short version? Here's a TL;DR in 3 tables.
|
||||
|
||||
| Remember |
|
||||
|:---|
|
||||
| Results converge as steps (`-s`) are increased (except for `K_DPM_2_A` and `K_EULER_A`). Often at ≥ `-s100`, but may require ≥ `-s700`). |
|
||||
| Producing a batch of candidate images at low (`-s8` to `-s30`) step counts can save you hours of computation. |
|
||||
| `K_HEUN` and `K_DPM_2` converge in less steps (but are slower). |
|
||||
| `K_DPM_2_A` and `K_EULER_A` incorporate a lot of creativity/variability. |
|
||||
!!! note "Remember"
|
||||
|
||||
- Results converge as steps (`-s`) are increased (except for `K_DPM_2_A` and `K_EULER_A`). Often at ≥ `-s100`, but may require ≥ `-s700`).
|
||||
- Producing a batch of candidate images at low (`-s8` to `-s30`) step counts can save you hours of computation.
|
||||
- `K_HEUN` and `K_DPM_2` converge in less steps (but are slower).
|
||||
- `K_DPM_2_A` and `K_EULER_A` incorporate a lot of creativity/variability.
|
||||
|
||||
<div align="center" markdown>
|
||||
|
||||
| Sampler | (3 sample avg) it/s (M1 Max 64GB, 512x512) |
|
||||
|---|---|
|
||||
@ -32,10 +34,13 @@ Looking for a short version? Here's a TL;DR in 3 tables.
|
||||
| `K_DPM_2_A` | 0.95 (slower) |
|
||||
| `K_EULER_A` | 1.86 |
|
||||
|
||||
| Suggestions |
|
||||
|:---|
|
||||
| For most use cases, `K_LMS`, `K_HEUN` and `K_DPM_2` are the best choices (the latter 2 run 0.5x as quick, but tend to converge 2x as quick as `K_LMS`). At very low steps (≤ `-s8`), `K_HEUN` and `K_DPM_2` are not recommended. Use `K_LMS` instead.|
|
||||
| For variability, use `K_EULER_A` (runs 2x as quick as `K_DPM_2_A`). |
|
||||
</div>
|
||||
|
||||
!!! tip "suggestions"
|
||||
|
||||
For most use cases, `K_LMS`, `K_HEUN` and `K_DPM_2` are the best choices (the latter 2 run 0.5x as quick, but tend to converge 2x as quick as `K_LMS`). At very low steps (≤ `-s8`), `K_HEUN` and `K_DPM_2` are not recommended. Use `K_LMS` instead.
|
||||
|
||||
For variability, use `K_EULER_A` (runs 2x as quick as `K_DPM_2_A`).
|
||||
|
||||
---
|
||||
|
||||
@ -60,15 +65,15 @@ This realization is very useful because it means you don't need to create a batc
|
||||
You can produce the same 100 images at `-s10` to `-s30` using a K-sampler (since they converge faster), get a rough idea of the final result, choose your 2 or 3 favorite ones, and then run `-s100` on those images to polish some details.
|
||||
The latter technique is 3-8x as quick.
|
||||
|
||||
Example:
|
||||
!!! example
|
||||
|
||||
At 60s per 100 steps.
|
||||
At 60s per 100 steps.
|
||||
|
||||
(Option A) 60s * 100 images = 6000s (100 images at `-s100`, manually picking 3 favorites)
|
||||
A) 60s * 100 images = 6000s (100 images at `-s100`, manually picking 3 favorites)
|
||||
|
||||
(Option B) 6s * 100 images + 60s * 3 images = 780s (100 images at `-s10`, manually picking 3 favorites, and running those 3 at `-s100` to polish details)
|
||||
B) 6s *100 images + 60s* 3 images = 780s (100 images at `-s10`, manually picking 3 favorites, and running those 3 at `-s100` to polish details)
|
||||
|
||||
The result is 1 hour and 40 minutes (Option A) vs 13 minutes (Option B).
|
||||
The result is __1 hour and 40 minutes__ for Variant A, vs __13 minutes__ for Variant B.
|
||||
|
||||
### *Topic convergance*
|
||||
|
||||
@ -110,9 +115,12 @@ Note also the point of convergence may not be the most desirable state (e.g. I p
|
||||
|
||||
Once we understand the concept of sampler convergence, we must look into the performance of each sampler in terms of steps (iterations) per second, as not all samplers run at the same speed.
|
||||
|
||||
On my M1 Max with 64GB of RAM, for a 512x512 image:
|
||||
| Sampler | (3 sample average) it/s |
|
||||
|---|---|
|
||||
<div align="center" markdown>
|
||||
|
||||
On my M1 Max with 64GB of RAM, for a 512x512 image
|
||||
|
||||
| Sampler | (3 sample average) it/s |
|
||||
| :--- | :--- |
|
||||
| `DDIM` | 1.89 |
|
||||
| `PLMS` | 1.86 |
|
||||
| `K_EULER` | 1.86 |
|
||||
@ -122,11 +130,13 @@ On my M1 Max with 64GB of RAM, for a 512x512 image:
|
||||
| `K_DPM_2_A` | 0.95 (slower) |
|
||||
| `K_EULER_A` | 1.86 |
|
||||
|
||||
</div>
|
||||
|
||||
Combining our results with the steps per second of each sampler, three choices come out on top: `K_LMS`, `K_HEUN` and `K_DPM_2` (where the latter two run 0.5x as quick but tend to converge 2x as quick as `K_LMS`). For creativity and a lot of variation between iterations, `K_EULER_A` can be a good choice (which runs 2x as quick as `K_DPM_2_A`).
|
||||
|
||||
Additionally, image generation at very low steps (≤ `-s8`) is not recommended for `K_HEUN` and `K_DPM_2`. Use `K_LMS` instead.
|
||||
|
||||
<img width="397" alt="192044949-67d5d441-a0d5-4d5a-be30-5dda4fc28a00-min" src="https://user-images.githubusercontent.com/50542132/192046823-2714cb29-bbf3-4eb1-9213-e27a0963905c.png">
|
||||
![K-compare](https://user-images.githubusercontent.com/50542132/192046823-2714cb29-bbf3-4eb1-9213-e27a0963905c.png){ width=600}
|
||||
|
||||
### *Three key points*
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
---
|
||||
title: F.A.Q.
|
||||
hide:
|
||||
- toc
|
||||
---
|
||||
|
||||
# :material-frequently-asked-questions: F.A.Q.
|
||||
@ -63,7 +65,7 @@ Reinstall the stable diffusion modules. Enter the `stable-diffusion` directory a
|
||||
|
||||
### **QUESTION**
|
||||
|
||||
`invoke.py` dies, complaining of various missing modules, none of which starts with `ldm``.
|
||||
`invoke.py` dies, complaining of various missing modules, none of which starts with `ldm`.
|
||||
|
||||
### **SOLUTION**
|
||||
|
||||
@ -87,9 +89,7 @@ Usually this will be sufficient, but if you start to see errors about
|
||||
missing or incorrect modules, use the command `pip install -e .`
|
||||
and/or `conda env update` (These commands won't break anything.)
|
||||
|
||||
`pip install -e .` and/or
|
||||
|
||||
`conda env update -f environment.yaml`
|
||||
`pip install -e .` and/or `conda env update -f environment.yaml`
|
||||
|
||||
(These commands won't break anything.)
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
---
|
||||
title: Home
|
||||
template: main.html
|
||||
---
|
||||
|
||||
<!--
|
||||
@ -13,7 +12,7 @@ template: main.html
|
||||
-->
|
||||
<div align="center" markdown>
|
||||
|
||||
# :material-script-text-outline: Stable Diffusion Dream Script
|
||||
# ^^**InvokeAI: A Stable Diffusion Toolkit**^^ :tools: <br> <small>Formerly known as lstein/stable-diffusion</small>
|
||||
|
||||
![project logo](assets/logo.png)
|
||||
|
||||
@ -29,8 +28,8 @@ template: main.html
|
||||
[CI checks on dev link]: https://github.com/invoke-ai/InvokeAI/actions?query=branch%3Adevelopment
|
||||
[CI checks on main badge]: https://flat.badgen.net/github/checks/invoke-ai/InvokeAI/main?label=CI%20status%20on%20main&cache=900&icon=github
|
||||
[CI checks on main link]: https://github.com/invoke-ai/InvokeAI/actions/workflows/test-invoke-conda.yml
|
||||
[discord badge]: https://flat.badgen.net/discord/members/htRgbc7e?icon=discord
|
||||
[discord link]: https://discord.com/invite/htRgbc7e
|
||||
[discord badge]: https://flat.badgen.net/discord/members/ZmtBAhwWhy?icon=discord
|
||||
[discord link]: https://discord.gg/ZmtBAhwWhy
|
||||
[github forks badge]: https://flat.badgen.net/github/forks/invoke-ai/InvokeAI?icon=github
|
||||
[github forks link]: https://useful-forks.github.io/?repo=lstein%2Fstable-diffusion
|
||||
[github open issues badge]: https://flat.badgen.net/github/open-issues/invoke-ai/InvokeAI?icon=github
|
||||
@ -46,16 +45,20 @@ template: main.html
|
||||
|
||||
</div>
|
||||
|
||||
This is a fork of [CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion), the open
|
||||
source text-to-image generator. It provides a streamlined process with various new features and
|
||||
options to aid the image generation process. It runs on Windows, Mac and Linux machines, and runs on
|
||||
GPU cards with as little as 4 GB or RAM.
|
||||
<a href="https://github.com/invoke-ai/InvokeAI">InvokeAI</a> is an
|
||||
implementation of Stable Diffusion, the open source text-to-image and
|
||||
image-to-image generator. It provides a streamlined process with
|
||||
various new features and options to aid the image generation
|
||||
process. It runs on Windows, Mac and Linux machines, and runs on GPU
|
||||
cards with as little as 4 GB or RAM.
|
||||
|
||||
**Quick links**: [<a href="https://discord.gg/NwVCmKwY">Discord Server</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
||||
|
||||
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
|
||||
|
||||
!!! note
|
||||
|
||||
This fork is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||
requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
|
||||
This fork is rapidly evolving. Please use the [Issues tab](https://github.com/invoke-ai/InvokeAI/issues) to report bugs and make feature requests. Be sure to use the provided templates. They will help aid diagnose issues faster.
|
||||
|
||||
## :octicons-package-dependencies-24: Installation
|
||||
|
||||
@ -81,7 +84,7 @@ You wil need one of the following:
|
||||
|
||||
### :fontawesome-regular-hard-drive: Disk
|
||||
|
||||
- At least 6 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||
- At least 12 GB of free disk space for the machine learning model, Python, and all its dependencies.
|
||||
|
||||
!!! note
|
||||
|
||||
@ -93,13 +96,34 @@ You wil need one of the following:
|
||||
To run in full-precision mode, start `invoke.py` with the `--full_precision` flag:
|
||||
|
||||
```bash
|
||||
(ldm) ~/stable-diffusion$ python scripts/invoke.py --full_precision
|
||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
|
||||
```
|
||||
## :octicons-log-16: Latest Changes
|
||||
|
||||
### vNEXT <small>(TODO 2022)</small>
|
||||
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
### v2.0.0 <small>(9 October 2022)</small>
|
||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
||||
for backward compatibility.
|
||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
||||
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/INPAINTING.md">inpainting</a> and <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OUTPAINTING.md">outpainting</a>
|
||||
- img2img runs on all k* samplers
|
||||
- Support for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/PROMPTS.md#negative-and-unconditioned-prompts">negative prompts</a>
|
||||
- Support for CodeFormer face reconstruction
|
||||
- Support for Textual Inversion on Macintoshes
|
||||
- Support in both WebGUI and CLI for <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/POSTPROCESS.md">post-processing of previously-generated images</a>
|
||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
||||
and "embiggen" upscaling. See the `!fix` command.
|
||||
- New `--hires` option on `invoke>` line allows <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.m#this-is-an-example-of-txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
||||
and tweaking of previous settings.
|
||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac platforms.
|
||||
- Improved <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/CLI.m">command-line completion behavior</a>.
|
||||
New commands added:
|
||||
* List command-line history with `!history`
|
||||
* Search command-line history with `!search`
|
||||
* Clear history with `!clear`
|
||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
||||
|
||||
### v1.14 <small>(11 September 2022)</small>
|
||||
|
@ -1,4 +1,10 @@
|
||||
# Before you begin
|
||||
---
|
||||
title: Docker
|
||||
---
|
||||
|
||||
# :fontawesome-brands-docker: Docker
|
||||
|
||||
## Before you begin
|
||||
|
||||
- For end users: Install Stable Diffusion locally using the instructions for
|
||||
your OS.
|
||||
@ -6,7 +12,7 @@
|
||||
deployment to other environments (on-premises or cloud), follow these
|
||||
instructions. For general use, install locally to leverage your machine's GPU.
|
||||
|
||||
# Why containers?
|
||||
## Why containers?
|
||||
|
||||
They provide a flexible, reliable way to build and deploy Stable Diffusion.
|
||||
You'll also use a Docker volume to store the largest model files and image
|
||||
@ -26,11 +32,11 @@ development purposes it's fine. Once you're done with development tasks on your
|
||||
laptop you can build for the target platform and architecture and deploy to
|
||||
another environment with NVIDIA GPUs on-premises or in the cloud.
|
||||
|
||||
# Installation on a Linux container
|
||||
## Installation on a Linux container
|
||||
|
||||
## Prerequisites
|
||||
### Prerequisites
|
||||
|
||||
### Get the data files
|
||||
#### Get the data files
|
||||
|
||||
Go to
|
||||
[Hugging Face](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original),
|
||||
@ -44,14 +50,14 @@ cd ~/Downloads
|
||||
wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth
|
||||
```
|
||||
|
||||
### Install [Docker](https://github.com/santisbon/guides#docker)
|
||||
#### Install [Docker](https://github.com/santisbon/guides#docker)
|
||||
|
||||
On the Docker Desktop app, go to Preferences, Resources, Advanced. Increase the
|
||||
CPUs and Memory to avoid this
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues/342). You may need to
|
||||
increase Swap and Disk image size too.
|
||||
|
||||
## Setup
|
||||
### Setup
|
||||
|
||||
Set the fork you want to use and other variables.
|
||||
|
||||
@ -132,9 +138,9 @@ docker run -it \
|
||||
$TAG_STABLE_DIFFUSION
|
||||
```
|
||||
|
||||
# Usage (time to have fun)
|
||||
## Usage (time to have fun)
|
||||
|
||||
## Startup
|
||||
### Startup
|
||||
|
||||
If you're on a **Linux container** the `invoke` script is **automatically
|
||||
started** and the output dir set to the Docker volume you created earlier.
|
||||
@ -158,7 +164,7 @@ invoke> -h
|
||||
invoke> q
|
||||
```
|
||||
|
||||
## Text to Image
|
||||
### Text to Image
|
||||
|
||||
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
||||
image. This will let you know that everything is set up correctly.
|
||||
@ -188,7 +194,7 @@ volume):
|
||||
docker cp dummy:/data/000001.928403745.png /Users/<your-user>/Pictures
|
||||
```
|
||||
|
||||
## Image to Image
|
||||
### Image to Image
|
||||
|
||||
You can also do text-guided image-to-image translation. For example, turning a
|
||||
sketch into a detailed drawing.
|
||||
@ -225,7 +231,7 @@ If you're on a Linux container on your Mac
|
||||
invoke> "A fantasy landscape, trending on artstation" -I /data/sketch-mountains-input.jpg --strength 0.75 --steps 50 -n1
|
||||
```
|
||||
|
||||
## Web Interface
|
||||
### Web Interface
|
||||
|
||||
You can use the `invoke` script with a graphical web interface. Start the web
|
||||
server with:
|
||||
@ -238,7 +244,7 @@ If it's running on your Mac point your Mac web browser to http://127.0.0.1:9090
|
||||
|
||||
Press Control-C at the command line to stop the web server.
|
||||
|
||||
## Notes
|
||||
### Notes
|
||||
|
||||
Some text you can add at the end of the prompt to make it very pretty:
|
||||
|
||||
|
@ -26,38 +26,36 @@ title: Linux
|
||||
|
||||
3. Copy the InvokeAI source code from GitHub:
|
||||
|
||||
```
|
||||
(base) ~$ git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
```
|
||||
```bash
|
||||
(base) ~$ git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
```
|
||||
|
||||
This will create InvokeAI folder where you will follow the rest of the steps.
|
||||
This will create InvokeAI folder where you will follow the rest of the steps.
|
||||
|
||||
4. Enter the newly-created InvokeAI folder. From this step forward make sure that you are working in the InvokeAI directory!
|
||||
|
||||
```
|
||||
(base) ~$ cd InvokeAI
|
||||
(base) ~/InvokeAI$
|
||||
```
|
||||
```bash
|
||||
(base) ~$ cd InvokeAI
|
||||
(base) ~/InvokeAI$
|
||||
```
|
||||
|
||||
5. Use anaconda to copy necessary python packages, create a new python
|
||||
environment named `ldm` and activate the environment.
|
||||
environment named `invokeai` and activate the environment.
|
||||
|
||||
```bash
|
||||
(base) ~/InvokeAI$ conda env create
|
||||
(base) ~/InvokeAI$ conda activate invokeai
|
||||
(invokeai) ~/InvokeAI$
|
||||
```
|
||||
|
||||
```
|
||||
(base) ~/InvokeAI$ conda env create
|
||||
(base) ~/InvokeAI$ conda activate ldm
|
||||
(ldm) ~/InvokeAI$
|
||||
```
|
||||
|
||||
After these steps, your command prompt will be prefixed by `(ldm)` as shown
|
||||
After these steps, your command prompt will be prefixed by `(invokeai)` as shown
|
||||
above.
|
||||
|
||||
6. Load a couple of small machine-learning models required by stable diffusion:
|
||||
|
||||
|
||||
```
|
||||
(ldm) ~/InvokeAI$ python3 scripts/preload_models.py
|
||||
```
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/preload_models.py
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
@ -69,7 +67,7 @@ This will create InvokeAI folder where you will follow the rest of the steps.
|
||||
|
||||
- For running with the released weights, you will first need to set up an acount
|
||||
with [Hugging Face](https://huggingface.co).
|
||||
- Use your credentials to log in, and then point your browser [here](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original.)
|
||||
- Use your credentials to log in, and then point your browser [here](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original).
|
||||
- You may be asked to sign a license agreement at this point.
|
||||
- Click on "Files and versions" near the top of the page, and then click on the
|
||||
file named "sd-v1-4.ckpt". You'll be taken to a page that prompts you to click
|
||||
@ -79,34 +77,33 @@ This will create InvokeAI folder where you will follow the rest of the steps.
|
||||
This will create a symbolic link from the stable-diffusion model.ckpt file, to
|
||||
the true location of the `sd-v1-4.ckpt` file.
|
||||
|
||||
|
||||
```
|
||||
(ldm) ~/InvokeAI$ mkdir -p models/ldm/stable-diffusion-v1
|
||||
(ldm) ~/InvokeAI$ ln -sf /path/to/sd-v1-4.ckpt models/ldm/stable-diffusion-v1/model.ckpt
|
||||
```
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ mkdir -p models/ldm/stable-diffusion-v1
|
||||
(invokeai) ~/InvokeAI$ ln -sf /path/to/sd-v1-4.ckpt models/ldm/stable-diffusion-v1/model.ckpt
|
||||
```
|
||||
|
||||
8. Start generating images!
|
||||
|
||||
```
|
||||
# for the pre-release weights use the -l or --liaon400m switch
|
||||
(ldm) ~/InvokeAI$ python3 scripts/invoke.py -l
|
||||
```bash
|
||||
# for the pre-release weights use the -l or --liaon400m switch
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py -l
|
||||
|
||||
# for the post-release weights do not use the switch
|
||||
(ldm) ~/InvokeAI$ python3 scripts/invoke.py
|
||||
# for the post-release weights do not use the switch
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py
|
||||
|
||||
# for additional configuration switches and arguments, use -h or --help
|
||||
(ldm) ~/InvokeAI$ python3 scripts/invoke.py -h
|
||||
```
|
||||
# for additional configuration switches and arguments, use -h or --help
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/invoke.py -h
|
||||
```
|
||||
|
||||
9. Subsequently, to relaunch the script, be sure to run "conda activate ldm" (step 5, second command), enter the `InvokeAI` directory, and then launch the invoke script (step 8). If you forget to activate the ldm environment, the script will fail with multiple `ModuleNotFound` errors.
|
||||
9. Subsequently, to relaunch the script, be sure to run "conda activate invokeai" (step 5, second command), enter the `InvokeAI` directory, and then launch the invoke script (step 8). If you forget to activate the 'invokeai' environment, the script will fail with multiple `ModuleNotFound` errors.
|
||||
|
||||
## Updating to newer versions of the script
|
||||
|
||||
|
||||
This distribution is changing rapidly. If you used the `git clone` method (step 5) to download the InvokeAI directory, then to update to the latest and greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
||||
|
||||
```
|
||||
(ldm) ~/InvokeAI$ git pull
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ git pull
|
||||
(invokeai) ~/InvokeAI$ conda env update -f environment.yml
|
||||
```
|
||||
|
||||
This will bring your local copy into sync with the remote one.
|
||||
|
@ -2,6 +2,8 @@
|
||||
title: macOS
|
||||
---
|
||||
|
||||
# :fontawesome-brands-apple: macOS
|
||||
|
||||
Invoke AI runs quite well on M1 Macs and we have a number of M1 users
|
||||
in the community.
|
||||
|
||||
@ -24,101 +26,130 @@ First you need to download a large checkpoint file.
|
||||
3. Accept the terms and click Access Repository
|
||||
4. Download [sd-v1-4.ckpt (4.27 GB)](https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/blob/main/sd-v1-4.ckpt) and note where you have saved it (probably the Downloads folder). You may want to move it somewhere else for longer term storage - SD needs this file to run.
|
||||
|
||||
While that is downloading, open Terminal and run the following
|
||||
commands one at a time, reading the comments and taking care to run
|
||||
the appropriate command for your Mac's architecture (Intel or M1).
|
||||
While that is downloading, open Terminal and run the following commands one at a time, reading the comments and taking care to run the appropriate command for your Mac's architecture (Intel or M1).
|
||||
|
||||
Do not just copy and paste the whole thing into your terminal!
|
||||
!!! todo "Homebrew"
|
||||
|
||||
```bash
|
||||
# Install brew (and Xcode command line tools):
|
||||
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
||||
If you have no brew installation yet (otherwise skip):
|
||||
|
||||
# Now there are two options to get the Python (miniconda) environment up and running:
|
||||
# 1. Alongside pyenv
|
||||
# 2. Standalone
|
||||
#
|
||||
# If you don't know what we are talking about, choose 2.
|
||||
#
|
||||
# If you are familiar with python environments, you'll know there are other options
|
||||
# for setting up the environment - you are on your own if you go one of those routes.
|
||||
##### BEGIN TWO DIFFERENT OPTIONS #####
|
||||
```bash title="install brew (and Xcode command line tools)"
|
||||
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
||||
```
|
||||
|
||||
### BEGIN OPTION 1: Installing alongside pyenv ###
|
||||
brew install pyenv-virtualenv # you might have this from before, no problem
|
||||
pyenv install anaconda3-2022.05
|
||||
pyenv virtualenv anaconda3-2022.05
|
||||
eval "$(pyenv init -)"
|
||||
pyenv activate anaconda3-2022.05
|
||||
### END OPTION 1 ###
|
||||
!!! todo "Conda Installation"
|
||||
|
||||
Now there are two different ways to set up the Python (miniconda) environment:
|
||||
|
||||
### BEGIN OPTION 2: Installing standalone ###
|
||||
# Install cmake, protobuf, and rust:
|
||||
brew install cmake protobuf rust
|
||||
1. Standalone
|
||||
2. with pyenv
|
||||
|
||||
# BEGIN ARCHITECTURE-DEPENDENT STEP #
|
||||
# For M1: install miniconda (M1 arm64 version):
|
||||
curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o Miniconda3-latest-MacOSX-arm64.sh
|
||||
/bin/bash Miniconda3-latest-MacOSX-arm64.sh
|
||||
If you don't know what we are talking about, choose Standalone. If you are familiar with python environments, choose "with pyenv"
|
||||
|
||||
# For Intel: install miniconda (Intel x86-64 version):
|
||||
curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -o Miniconda3-latest-MacOSX-x86_64.sh
|
||||
/bin/bash Miniconda3-latest-MacOSX-x86_64.sh
|
||||
# END ARCHITECTURE-DEPENDENT STEP #
|
||||
=== "Standalone"
|
||||
|
||||
### END OPTION 2 ###
|
||||
```bash title="Install cmake, protobuf, and rust"
|
||||
brew install cmake protobuf rust
|
||||
```
|
||||
|
||||
##### END TWO DIFFERENT OPTIONS #####
|
||||
Then clone the InvokeAI repository:
|
||||
|
||||
```bash title="Clone the InvokeAI repository:
|
||||
# Clone the Invoke AI repo
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
cd InvokeAI
|
||||
```
|
||||
|
||||
Choose the appropriate architecture for your system and install miniconda:
|
||||
|
||||
# Clone the Invoke AI repo
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
cd InvokeAI
|
||||
<<<<<<< HEAD
|
||||
=== "M1 arm64"
|
||||
|
||||
### WAIT FOR THE CHECKPOINT FILE TO DOWNLOAD, THEN PROCEED ###
|
||||
# We will leave the big checkpoint wherever you stashed it for long-term storage,
|
||||
# and make a link to it from the repo's folder. This allows you to use it for
|
||||
# other repos, and if you need to delete Invoke AI, you won't have to download it again.
|
||||
```bash title="Install miniconda for M1 arm64"
|
||||
curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh \
|
||||
-o Miniconda3-latest-MacOSX-arm64.sh
|
||||
/bin/bash Miniconda3-latest-MacOSX-arm64.sh
|
||||
```
|
||||
|
||||
# Make the directory in the repo for the symlink
|
||||
mkdir -p models/ldm/stable-diffusion-v1/
|
||||
=== "Intel x86_64"
|
||||
|
||||
# This is the folder where you put the checkpoint file `sd-v1-4.ckpt`
|
||||
PATH_TO_CKPT="$HOME/Downloads"
|
||||
```bash title="Install miniconda for Intel"
|
||||
curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh \
|
||||
-o Miniconda3-latest-MacOSX-x86_64.sh
|
||||
/bin/bash Miniconda3-latest-MacOSX-x86_64.sh
|
||||
```
|
||||
|
||||
# Create a link to the checkpoint
|
||||
ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt
|
||||
=== "with pyenv"
|
||||
|
||||
# BEGIN ARCHITECTURE-DEPENDENT STEP #
|
||||
# For M1: Create the environment & install packages
|
||||
PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yml
|
||||
```bash
|
||||
brew install pyenv-virtualenv
|
||||
pyenv install anaconda3-2022.05
|
||||
pyenv virtualenv anaconda3-2022.05
|
||||
eval "$(pyenv init -)"
|
||||
pyenv activate anaconda3-2022.05
|
||||
```
|
||||
|
||||
# For Intel: Create the environment & install packages
|
||||
PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-64 conda env create -f environment-mac.yml
|
||||
# END ARCHITECTURE-DEPENDENT STEP #
|
||||
!!! todo "Clone the Invoke AI repo"
|
||||
|
||||
# Activate the environment (you need to do this every time you want to run SD)
|
||||
conda activate invokeai
|
||||
```bash
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
cd InvokeAI
|
||||
```
|
||||
|
||||
# This will download some bits and pieces and make take a while
|
||||
python scripts/preload_models.py
|
||||
!!! todo "Wait until the checkpoint-file download finished, then proceed"
|
||||
|
||||
# Run SD!
|
||||
python scripts/dream.py
|
||||
```
|
||||
# or run the web interface!
|
||||
python scripts/invoke.py --web
|
||||
We will leave the big checkpoint wherever you stashed it for long-term storage,
|
||||
and make a link to it from the repo's folder. This allows you to use it for
|
||||
other repos, or if you need to delete Invoke AI, you won't have to download it again.
|
||||
|
||||
# The original scripts should work as well.
|
||||
python scripts/orig_scripts/txt2img.py \
|
||||
--prompt "a photograph of an astronaut riding a horse" \
|
||||
--plms
|
||||
```
|
||||
```{.bash .annotate}
|
||||
# Make the directory in the repo for the symlink
|
||||
mkdir -p models/ldm/stable-diffusion-v1/
|
||||
|
||||
Note, `export PIP_EXISTS_ACTION=w` is a precaution to fix `conda env
|
||||
create -f environment-mac.yml` never finishing in some situations. So
|
||||
it isn't required but wont hurt.
|
||||
# This is the folder where you put the checkpoint file `sd-v1-4.ckpt`
|
||||
PATH_TO_CKPT="$HOME/Downloads" # (1)!
|
||||
|
||||
# Create a link to the checkpoint
|
||||
ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" models/ldm/stable-diffusion-v1/model.ckpt
|
||||
```
|
||||
|
||||
1. replace `$HOME/Downloads` with the Location where you actually stored the Checkppoint (`sd-v1-4.ckpt`)
|
||||
|
||||
!!! todo "Create the environment & install packages"
|
||||
|
||||
=== "M1 Mac"
|
||||
|
||||
```bash
|
||||
PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-arm64 conda env create -f environment-mac.yml
|
||||
```
|
||||
|
||||
=== "Intel x86_64 Mac"
|
||||
|
||||
```bash
|
||||
PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-64 conda env create -f environment-mac.yml
|
||||
```
|
||||
|
||||
```bash
|
||||
# Activate the environment (you need to do this every time you want to run SD)
|
||||
conda activate invokeai
|
||||
|
||||
# This will download some bits and pieces and make take a while
|
||||
(invokeai) python scripts/preload_models.py
|
||||
|
||||
# Run SD!
|
||||
(invokeai) python scripts/dream.py
|
||||
|
||||
# or run the web interface!
|
||||
(invokeai) python scripts/invoke.py --web
|
||||
|
||||
# The original scripts should work as well.
|
||||
(invokeai) python scripts/orig_scripts/txt2img.py \
|
||||
--prompt "a photograph of an astronaut riding a horse" \
|
||||
--plms
|
||||
```
|
||||
!!! info
|
||||
|
||||
`export PIP_EXISTS_ACTION=w` is a precaution to fix `conda env
|
||||
create -f environment-mac.yml` never finishing in some situations. So
|
||||
it isn't required but wont hurt.
|
||||
---
|
||||
|
||||
## Common problems
|
||||
@ -158,7 +189,6 @@ conda install \
|
||||
-n invokeai
|
||||
```
|
||||
|
||||
|
||||
If it takes forever to run `conda env create -f environment-mac.yml`, try this:
|
||||
|
||||
```bash
|
||||
@ -170,12 +200,12 @@ conda clean \
|
||||
|
||||
Or you could try to completley reset Anaconda:
|
||||
|
||||
```bash
|
||||
conda update \
|
||||
--force-reinstall \
|
||||
-y \
|
||||
-n base \
|
||||
-c defaults conda
|
||||
```bash
|
||||
conda update \
|
||||
--force-reinstall \
|
||||
-y \
|
||||
-n base \
|
||||
-c defaults conda
|
||||
```
|
||||
|
||||
---
|
||||
@ -203,7 +233,7 @@ There are several causes of these errors:
|
||||
conda env create -f environment-mac.yml
|
||||
```
|
||||
|
||||
4. If you have activated the linvokeaidm virtual environment and tried rebuilding it,
|
||||
4. If you have activated the invokeai virtual environment and tried rebuilding it,
|
||||
maybe the problem could be that I have something installed that you don't and
|
||||
you'll just need to manually install it. Make sure you activate the virtual
|
||||
environment so it installs there instead of globally.
|
||||
|
@ -39,7 +39,7 @@ in the wiki
|
||||
|
||||
4. Run the command:
|
||||
|
||||
```bash
|
||||
```batch
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
```
|
||||
|
||||
@ -48,17 +48,21 @@ in the wiki
|
||||
|
||||
5. Enter the newly-created InvokeAI folder. From this step forward make sure that you are working in the InvokeAI directory!
|
||||
|
||||
```
|
||||
cd InvokeAI
|
||||
```
|
||||
```batch
|
||||
cd InvokeAI
|
||||
```
|
||||
|
||||
6. Run the following two commands:
|
||||
|
||||
```
|
||||
conda env create (step 6a)
|
||||
conda activate ldm (step 6b)
|
||||
```
|
||||
This will install all python requirements and activate the "ldm" environment
|
||||
```batch title="step 6a"
|
||||
conda env create
|
||||
```
|
||||
|
||||
```batch title="step 6b"
|
||||
conda activate invokeai
|
||||
```
|
||||
|
||||
This will install all python requirements and activate the "invokeai" environment
|
||||
which sets PATH and other environment variables properly.
|
||||
|
||||
Note that the long form of the first command is `conda env create -f environment.yml`. If the
|
||||
@ -67,7 +71,7 @@ conda activate ldm (step 6b)
|
||||
|
||||
7. Run the command:
|
||||
|
||||
```bash
|
||||
```batch
|
||||
python scripts\preload_models.py
|
||||
```
|
||||
|
||||
@ -79,45 +83,44 @@ conda activate ldm (step 6b)
|
||||
|
||||
8. Now you need to install the weights for the big stable diffusion model.
|
||||
|
||||
- For running with the released weights, you will first need to set up an acount with Hugging Face (https://huggingface.co).
|
||||
- Use your credentials to log in, and then point your browser at https://huggingface.co/CompVis/stable-diffusion-v-1-4-original.
|
||||
- You may be asked to sign a license agreement at this point.
|
||||
- Click on "Files and versions" near the top of the page, and then click on the file named `sd-v1-4.ckpt`. You'll be taken to a page that
|
||||
prompts you to click the "download" link. Now save the file somewhere safe on your local machine.
|
||||
- The weight file is >4 GB in size, so
|
||||
downloading may take a while.
|
||||
1. For running with the released weights, you will first need to set up an acount with Hugging Face (https://huggingface.co).
|
||||
2. Use your credentials to log in, and then point your browser at https://huggingface.co/CompVis/stable-diffusion-v-1-4-original.
|
||||
3. You may be asked to sign a license agreement at this point.
|
||||
4. Click on "Files and versions" near the top of the page, and then click on the file named `sd-v1-4.ckpt`. You'll be taken to a page that
|
||||
prompts you to click the "download" link. Now save the file somewhere safe on your local machine.
|
||||
5. The weight file is >4 GB in size, so
|
||||
downloading may take a while.
|
||||
|
||||
Now run the following commands from **within the InvokeAI directory** to copy the weights file to the right place:
|
||||
Now run the following commands from **within the InvokeAI directory** to copy the weights file to the right place:
|
||||
|
||||
```
|
||||
mkdir -p models\ldm\stable-diffusion-v1
|
||||
copy C:\path\to\sd-v1-4.ckpt models\ldm\stable-diffusion-v1\model.ckpt
|
||||
```
|
||||
```batch
|
||||
mkdir -p models\ldm\stable-diffusion-v1
|
||||
copy C:\path\to\sd-v1-4.ckpt models\ldm\stable-diffusion-v1\model.ckpt
|
||||
```
|
||||
|
||||
Please replace `C:\path\to\sd-v1.4.ckpt` with the correct path to wherever you stashed this file. If you prefer not to copy or move the .ckpt file,
|
||||
you may instead create a shortcut to it from within `models\ldm\stable-diffusion-v1\`.
|
||||
Please replace `C:\path\to\sd-v1.4.ckpt` with the correct path to wherever you stashed this file. If you prefer not to copy or move the .ckpt file,
|
||||
you may instead create a shortcut to it from within `models\ldm\stable-diffusion-v1\`.
|
||||
|
||||
9. Start generating images!
|
||||
|
||||
```bash
|
||||
# for the pre-release weights
|
||||
```batch title="for the pre-release weights"
|
||||
python scripts\invoke.py -l
|
||||
```
|
||||
|
||||
# for the post-release weights
|
||||
```batch title="for the post-release weights"
|
||||
python scripts\invoke.py
|
||||
```
|
||||
|
||||
10. Subsequently, to relaunch the script, first activate the Anaconda command window (step 3),enter the InvokeAI directory (step 5, `cd \path\to\InvokeAI`), run `conda activate ldm` (step 6b), and then launch the invoke script (step 9).
|
||||
10. Subsequently, to relaunch the script, first activate the Anaconda command window (step 3),enter the InvokeAI directory (step 5, `cd \path\to\InvokeAI`), run `conda activate invokeai` (step 6b), and then launch the invoke script (step 9).
|
||||
|
||||
!!! tip "Tildebyte has written an alternative"
|
||||
|
||||
**Note:** Tildebyte has written an alternative
|
||||
["Easy peasy Windows install"](https://github.com/invoke-ai/InvokeAI/wiki/Easy-peasy-Windows-install)
|
||||
which uses the Windows Powershell and pew. If you are having trouble with
|
||||
Anaconda on Windows, give this a try (or try it first!)
|
||||
|
||||
---
|
||||
|
||||
This distribution is changing rapidly. If you used the `git clone` method (step 5) to download the InvokeAI directory, then to update to the latest and greatest version, launch the Anaconda window, enter `InvokeAI`, and type:
|
||||
|
||||
This distribution is changing rapidly. If you used the `git clone` method
|
||||
(step 5) to download the stable-diffusion directory, then to update to the
|
||||
latest and greatest version, launch the Anaconda window, enter
|
||||
|
@ -1,4 +1,4 @@
|
||||
name: ldm
|
||||
name: invokeai
|
||||
channels:
|
||||
- pytorch
|
||||
- conda-forge
|
||||
@ -47,16 +47,17 @@ dependencies:
|
||||
- dependency_injector==4.40.0
|
||||
- eventlet==0.33.1
|
||||
- opencv-python==4.6.0
|
||||
- protobuf==3.19.5
|
||||
- protobuf==3.19.6
|
||||
- realesrgan==0.2.5.0
|
||||
- send2trash==1.8.0
|
||||
- test-tube==0.7.5
|
||||
- transformers==4.21.2
|
||||
- transformers==4.21.3
|
||||
- torch-fidelity==0.3.0
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
- -e .
|
||||
variables:
|
||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
||||
|
@ -1,4 +1,4 @@
|
||||
name: ldm
|
||||
name: invokeai
|
||||
channels:
|
||||
- pytorch
|
||||
- defaults
|
||||
@ -15,7 +15,7 @@ dependencies:
|
||||
- pudb==2019.2
|
||||
- imageio==2.9.0
|
||||
- imageio-ffmpeg==0.4.2
|
||||
- pytorch-lightning==1.4.2
|
||||
- pytorch-lightning==1.7.7
|
||||
- omegaconf==2.1.1
|
||||
- realesrgan==0.2.5.0
|
||||
- test-tube>=0.7.5
|
||||
@ -25,8 +25,8 @@ dependencies:
|
||||
- einops==0.3.0
|
||||
- pyreadline3
|
||||
- torch-fidelity==0.3.0
|
||||
- transformers==4.19.2
|
||||
- torchmetrics==0.6.0
|
||||
- transformers==4.21.3
|
||||
- torchmetrics==0.7.0
|
||||
- flask==2.1.3
|
||||
- flask_socketio==5.3.0
|
||||
- flask_cors==3.0.10
|
||||
@ -37,4 +37,5 @@ dependencies:
|
||||
- -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
- -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- -e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
||||
- -e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
- -e .
|
||||
|
690
frontend/dist/assets/index.b06af007.js
vendored
Normal file
483
frontend/dist/assets/index.ea68b5f5.js
vendored
2
frontend/dist/index.html
vendored
@ -6,7 +6,7 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="/assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="/assets/index.ea68b5f5.js"></script>
|
||||
<script type="module" crossorigin src="/assets/index.b06af007.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index.58175ea1.css">
|
||||
</head>
|
||||
|
||||
|
@ -32,27 +32,8 @@ export const UPSCALING_LEVELS: Array<{ key: string; value: number }> = [
|
||||
{ key: '4x', value: 4 },
|
||||
];
|
||||
|
||||
// Internal to human-readable parameters
|
||||
export const PARAMETERS: { [key: string]: string } = {
|
||||
prompt: 'Prompt',
|
||||
iterations: 'Iterations',
|
||||
steps: 'Steps',
|
||||
cfgScale: 'CFG Scale',
|
||||
height: 'Height',
|
||||
width: 'Width',
|
||||
sampler: 'Sampler',
|
||||
seed: 'Seed',
|
||||
img2imgStrength: 'img2img Strength',
|
||||
gfpganStrength: 'GFPGAN Strength',
|
||||
upscalingLevel: 'Upscaling Level',
|
||||
upscalingStrength: 'Upscaling Strength',
|
||||
initialImagePath: 'Initial Image',
|
||||
maskPath: 'Initial Image Mask',
|
||||
shouldFitToWidthHeight: 'Fit Initial Image',
|
||||
seamless: 'Seamless Tiling',
|
||||
hiresFix: 'High Resolution Optimizations',
|
||||
};
|
||||
|
||||
export const NUMPY_RAND_MIN = 0;
|
||||
|
||||
export const NUMPY_RAND_MAX = 4294967295;
|
||||
|
||||
export const FACETOOL_TYPES = ['gfpgan', 'codeformer'] as const;
|
||||
|
@ -15,8 +15,8 @@ export enum Feature {
|
||||
IMAGE_TO_IMAGE,
|
||||
}
|
||||
/** For each tooltip in the UI, the below feature definitions & props will pull relevant information into the tooltip.
|
||||
*
|
||||
* To-do: href & GuideImages are placeholders, and are not currently utilized, but will be updated (along with the tooltip UI) as feature and UI development and we get a better idea on where things "forever homes" will be .
|
||||
*
|
||||
* To-do: href & GuideImages are placeholders, and are not currently utilized, but will be updated (along with the tooltip UI) as feature and UI development and we get a better idea on where things "forever homes" will be .
|
||||
*/
|
||||
export const FEATURES: Record<Feature, FeatureHelpInfo> = {
|
||||
[Feature.PROMPT]: {
|
||||
@ -30,7 +30,8 @@ export const FEATURES: Record<Feature, FeatureHelpInfo> = {
|
||||
guideImage: 'asset/path.gif',
|
||||
},
|
||||
[Feature.OTHER]: {
|
||||
text: 'These options will enable alternative processing modes for Invoke. Seamless tiling will work to generate repeating patterns in the output. High Resolution Optimization performs a two-step generation cycle, and should be used at higher resolutions when you desire a more coherent image/composition. ', href: 'link/to/docs/feature3.html',
|
||||
text: 'These options will enable alternative processing modes for Invoke. Seamless tiling will work to generate repeating patterns in the output. High Resolution Optimization performs a two-step generation cycle, and should be used at higher resolutions when you desire a more coherent image/composition. ',
|
||||
href: 'link/to/docs/feature3.html',
|
||||
guideImage: 'asset/path.gif',
|
||||
},
|
||||
[Feature.SEED]: {
|
||||
@ -49,7 +50,7 @@ export const FEATURES: Record<Feature, FeatureHelpInfo> = {
|
||||
guideImage: 'asset/path.gif',
|
||||
},
|
||||
[Feature.FACE_CORRECTION]: {
|
||||
text: 'Using GFPGAN, Face Correction will attempt to identify faces in outputs, and correct any defects/abnormalities. Higher values will apply a stronger corrective pressure on outputs, resulting in more appealing faces (with less respect for accuracy of the original subject).',
|
||||
text: 'Using GFPGAN or Codeformer, Face Correction will attempt to identify faces in outputs, and correct any defects/abnormalities. Higher strength values will apply a stronger corrective pressure on outputs, resulting in more appealing faces. With Codeformer, a higher fidelity will attempt to preserve the original image, at the expense of face correction strength.',
|
||||
href: 'link/to/docs/feature3.html',
|
||||
guideImage: 'asset/path.gif',
|
||||
},
|
||||
|
7
frontend/src/app/invokeai.d.ts
vendored
@ -89,15 +89,16 @@ export declare type ESRGANMetadata = CommonPostProcessedImageMetadata & {
|
||||
strength: number;
|
||||
};
|
||||
|
||||
export declare type GFPGANMetadata = CommonPostProcessedImageMetadata & {
|
||||
type: 'gfpgan';
|
||||
export declare type FacetoolMetadata = CommonPostProcessedImageMetadata & {
|
||||
type: 'gfpgan' | 'codeformer';
|
||||
strength: number;
|
||||
fidelity?: number;
|
||||
};
|
||||
|
||||
// Superset of all postprocessed image metadata types..
|
||||
export declare type PostProcessedImageMetadata =
|
||||
| ESRGANMetadata
|
||||
| GFPGANMetadata;
|
||||
| FacetoolMetadata;
|
||||
|
||||
// Metadata includes the system config and image metadata.
|
||||
export declare type Metadata = SystemConfig & {
|
||||
|
@ -10,7 +10,7 @@ import * as InvokeAI from '../invokeai';
|
||||
|
||||
export const generateImage = createAction<undefined>('socketio/generateImage');
|
||||
export const runESRGAN = createAction<InvokeAI.Image>('socketio/runESRGAN');
|
||||
export const runGFPGAN = createAction<InvokeAI.Image>('socketio/runGFPGAN');
|
||||
export const runFacetool = createAction<InvokeAI.Image>('socketio/runFacetool');
|
||||
export const deleteImage = createAction<InvokeAI.Image>('socketio/deleteImage');
|
||||
export const requestImages = createAction<undefined>(
|
||||
'socketio/requestImages'
|
||||
|
@ -30,14 +30,14 @@ const makeSocketIOEmitters = (
|
||||
options.shouldUseInitImage = false;
|
||||
}
|
||||
|
||||
const { generationParameters, esrganParameters, gfpganParameters } =
|
||||
const { generationParameters, esrganParameters, facetoolParameters } =
|
||||
frontendToBackendParameters(options, getState().system);
|
||||
|
||||
socketio.emit(
|
||||
'generateImage',
|
||||
generationParameters,
|
||||
esrganParameters,
|
||||
gfpganParameters
|
||||
facetoolParameters
|
||||
);
|
||||
|
||||
dispatch(
|
||||
@ -46,7 +46,7 @@ const makeSocketIOEmitters = (
|
||||
message: `Image generation requested: ${JSON.stringify({
|
||||
...generationParameters,
|
||||
...esrganParameters,
|
||||
...gfpganParameters,
|
||||
...facetoolParameters,
|
||||
})}`,
|
||||
})
|
||||
);
|
||||
@ -71,24 +71,32 @@ const makeSocketIOEmitters = (
|
||||
})
|
||||
);
|
||||
},
|
||||
emitRunGFPGAN: (imageToProcess: InvokeAI.Image) => {
|
||||
emitRunFacetool: (imageToProcess: InvokeAI.Image) => {
|
||||
dispatch(setIsProcessing(true));
|
||||
const { gfpganStrength } = getState().options;
|
||||
const { facetoolType, facetoolStrength, codeformerFidelity } =
|
||||
getState().options;
|
||||
|
||||
const gfpganParameters = {
|
||||
facetool_strength: gfpganStrength,
|
||||
const facetoolParameters: Record<string, any> = {
|
||||
facetool_strength: facetoolStrength,
|
||||
};
|
||||
|
||||
if (facetoolType === 'codeformer') {
|
||||
facetoolParameters.codeformer_fidelity = codeformerFidelity;
|
||||
}
|
||||
|
||||
socketio.emit('runPostprocessing', imageToProcess, {
|
||||
type: 'gfpgan',
|
||||
...gfpganParameters,
|
||||
type: facetoolType,
|
||||
...facetoolParameters,
|
||||
});
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `GFPGAN fix faces requested: ${JSON.stringify({
|
||||
file: imageToProcess.url,
|
||||
...gfpganParameters,
|
||||
})}`,
|
||||
message: `Face restoration (${facetoolType}) requested: ${JSON.stringify(
|
||||
{
|
||||
file: imageToProcess.url,
|
||||
...facetoolParameters,
|
||||
}
|
||||
)}`,
|
||||
})
|
||||
);
|
||||
},
|
||||
|
@ -151,32 +151,6 @@ const makeSocketIOListeners = (
|
||||
console.error(e);
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Callback to run when we receive a 'gfpganResult' event.
|
||||
*/
|
||||
onGFPGANResult: (data: InvokeAI.ImageResultResponse) => {
|
||||
try {
|
||||
const { url, metadata, mtime } = data;
|
||||
|
||||
dispatch(
|
||||
addImage({
|
||||
uuid: uuidv4(),
|
||||
url,
|
||||
mtime,
|
||||
metadata,
|
||||
})
|
||||
);
|
||||
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
message: `Fixed faces: ${url}`,
|
||||
})
|
||||
);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Callback to run when we receive a 'progressUpdate' event.
|
||||
* TODO: Add additional progress phases
|
||||
|
@ -22,9 +22,9 @@ import * as InvokeAI from '../invokeai';
|
||||
* some new action to handle whatever data was sent from the server.
|
||||
*/
|
||||
export const socketioMiddleware = () => {
|
||||
const { hostname, port } = new URL(window.location.href);
|
||||
const { origin } = new URL(window.location.href);
|
||||
|
||||
const socketio = io(`http://${hostname}:${port}`, {
|
||||
const socketio = io(origin, {
|
||||
timeout: 60000,
|
||||
});
|
||||
|
||||
@ -50,7 +50,7 @@ export const socketioMiddleware = () => {
|
||||
const {
|
||||
emitGenerateImage,
|
||||
emitRunESRGAN,
|
||||
emitRunGFPGAN,
|
||||
emitRunFacetool,
|
||||
emitDeleteImage,
|
||||
emitRequestImages,
|
||||
emitRequestNewImages,
|
||||
@ -129,8 +129,8 @@ export const socketioMiddleware = () => {
|
||||
break;
|
||||
}
|
||||
|
||||
case 'socketio/runGFPGAN': {
|
||||
emitRunGFPGAN(action.payload);
|
||||
case 'socketio/runFacetool': {
|
||||
emitRunFacetool(action.payload);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -41,8 +41,10 @@ export const frontendToBackendParameters = (
|
||||
shouldRunESRGAN,
|
||||
upscalingLevel,
|
||||
upscalingStrength,
|
||||
shouldRunGFPGAN,
|
||||
gfpganStrength,
|
||||
shouldRunFacetool,
|
||||
facetoolStrength,
|
||||
codeformerFidelity,
|
||||
facetoolType,
|
||||
shouldRandomizeSeed,
|
||||
} = optionsState;
|
||||
|
||||
@ -88,7 +90,7 @@ export const frontendToBackendParameters = (
|
||||
}
|
||||
|
||||
let esrganParameters: false | { [k: string]: any } = false;
|
||||
let gfpganParameters: false | { [k: string]: any } = false;
|
||||
let facetoolParameters: false | { [k: string]: any } = false;
|
||||
|
||||
if (shouldRunESRGAN) {
|
||||
esrganParameters = {
|
||||
@ -97,99 +99,19 @@ export const frontendToBackendParameters = (
|
||||
};
|
||||
}
|
||||
|
||||
if (shouldRunGFPGAN) {
|
||||
gfpganParameters = {
|
||||
strength: gfpganStrength,
|
||||
if (shouldRunFacetool) {
|
||||
facetoolParameters = {
|
||||
type: facetoolType,
|
||||
strength: facetoolStrength,
|
||||
};
|
||||
if (facetoolType === 'codeformer') {
|
||||
facetoolParameters.codeformer_fidelity = codeformerFidelity
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
generationParameters,
|
||||
esrganParameters,
|
||||
gfpganParameters,
|
||||
facetoolParameters,
|
||||
};
|
||||
};
|
||||
|
||||
export const backendToFrontendParameters = (parameters: {
|
||||
[key: string]: any;
|
||||
}) => {
|
||||
const {
|
||||
prompt,
|
||||
iterations,
|
||||
steps,
|
||||
cfg_scale,
|
||||
threshold,
|
||||
perlin,
|
||||
height,
|
||||
width,
|
||||
sampler_name,
|
||||
seed,
|
||||
seamless,
|
||||
hires_fix,
|
||||
progress_images,
|
||||
variation_amount,
|
||||
with_variations,
|
||||
facetool_strength,
|
||||
upscale,
|
||||
init_img,
|
||||
init_mask,
|
||||
strength,
|
||||
} = parameters;
|
||||
|
||||
const options: { [key: string]: any } = {
|
||||
shouldDisplayInProgress: progress_images,
|
||||
// init
|
||||
shouldGenerateVariations: false,
|
||||
shouldRunESRGAN: false,
|
||||
shouldRunGFPGAN: false,
|
||||
initialImagePath: '',
|
||||
maskPath: '',
|
||||
};
|
||||
|
||||
if (variation_amount > 0) {
|
||||
options.shouldGenerateVariations = true;
|
||||
options.variationAmount = variation_amount;
|
||||
if (with_variations) {
|
||||
options.seedWeights = seedWeightsToString(with_variations);
|
||||
}
|
||||
}
|
||||
|
||||
if (facetool_strength > 0) {
|
||||
options.shouldRunGFPGAN = true;
|
||||
options.gfpganStrength = facetool_strength;
|
||||
}
|
||||
|
||||
if (upscale) {
|
||||
options.shouldRunESRGAN = true;
|
||||
options.upscalingLevel = upscale[0];
|
||||
options.upscalingStrength = upscale[1];
|
||||
}
|
||||
|
||||
if (init_img) {
|
||||
options.shouldUseInitImage = true;
|
||||
options.initialImagePath = init_img;
|
||||
options.strength = strength;
|
||||
if (init_mask) {
|
||||
options.maskPath = init_mask;
|
||||
}
|
||||
}
|
||||
|
||||
// if we had a prompt, add all the metadata, but if we don't have a prompt,
|
||||
// we must have only done ESRGAN or GFPGAN so do not add that metadata
|
||||
if (prompt) {
|
||||
options.prompt = prompt;
|
||||
options.iterations = iterations;
|
||||
options.steps = steps;
|
||||
options.cfgScale = cfg_scale;
|
||||
options.threshold = threshold;
|
||||
options.perlin = perlin;
|
||||
options.height = height;
|
||||
options.width = width;
|
||||
options.sampler = sampler_name;
|
||||
options.seed = seed;
|
||||
options.seamless = seamless;
|
||||
options.hiresFix = hires_fix;
|
||||
}
|
||||
|
||||
return options;
|
||||
};
|
||||
|
@ -15,7 +15,7 @@ import {
|
||||
import DeleteImageModal from './DeleteImageModal';
|
||||
import { SystemState } from '../system/systemSlice';
|
||||
import IAIButton from '../../common/components/IAIButton';
|
||||
import { runESRGAN, runGFPGAN } from '../../app/socketio/actions';
|
||||
import { runESRGAN, runFacetool } from '../../app/socketio/actions';
|
||||
import IAIIconButton from '../../common/components/IAIIconButton';
|
||||
import { MdDelete, MdFace, MdHd, MdImage, MdInfo } from 'react-icons/md';
|
||||
import InvokePopover from './InvokePopover';
|
||||
@ -66,8 +66,8 @@ const CurrentImageButtons = ({ image }: CurrentImageButtonsProps) => {
|
||||
(state: RootState) => state.options.upscalingLevel
|
||||
);
|
||||
|
||||
const gfpganStrength = useAppSelector(
|
||||
(state: RootState) => state.options.gfpganStrength
|
||||
const facetoolStrength = useAppSelector(
|
||||
(state: RootState) => state.options.facetoolStrength
|
||||
);
|
||||
|
||||
const { isProcessing, isConnected, isGFPGANAvailable, isESRGANAvailable } =
|
||||
@ -186,7 +186,8 @@ const CurrentImageButtons = ({ image }: CurrentImageButtonsProps) => {
|
||||
]
|
||||
);
|
||||
|
||||
const handleClickFixFaces = () => dispatch(runGFPGAN(image));
|
||||
const handleClickFixFaces = () => dispatch(runFacetool(image));
|
||||
|
||||
useHotkeys(
|
||||
'r',
|
||||
() => {
|
||||
@ -195,7 +196,7 @@ const CurrentImageButtons = ({ image }: CurrentImageButtonsProps) => {
|
||||
Boolean(!intermediateImage) &&
|
||||
isConnected &&
|
||||
!isProcessing &&
|
||||
gfpganStrength
|
||||
facetoolStrength
|
||||
) {
|
||||
handleClickFixFaces();
|
||||
} else {
|
||||
@ -213,7 +214,7 @@ const CurrentImageButtons = ({ image }: CurrentImageButtonsProps) => {
|
||||
intermediateImage,
|
||||
isConnected,
|
||||
isProcessing,
|
||||
gfpganStrength,
|
||||
facetoolStrength,
|
||||
]
|
||||
);
|
||||
|
||||
@ -270,7 +271,7 @@ const CurrentImageButtons = ({ image }: CurrentImageButtonsProps) => {
|
||||
!isGFPGANAvailable ||
|
||||
Boolean(intermediateImage) ||
|
||||
!(isConnected && !isProcessing) ||
|
||||
!gfpganStrength
|
||||
!facetoolStrength
|
||||
}
|
||||
onClick={handleClickFixFaces}
|
||||
/>
|
||||
|
@ -14,7 +14,9 @@ import { useAppDispatch } from '../../../app/store';
|
||||
import * as InvokeAI from '../../../app/invokeai';
|
||||
import {
|
||||
setCfgScale,
|
||||
setGfpganStrength,
|
||||
setFacetoolStrength,
|
||||
setCodeformerFidelity,
|
||||
setFacetoolType,
|
||||
setHeight,
|
||||
setHiresFix,
|
||||
setImg2imgStrength,
|
||||
@ -151,7 +153,7 @@ const ImageMetadataViewer = memo(
|
||||
<MetadataItem
|
||||
label="Fix faces strength"
|
||||
value={strength}
|
||||
onClick={() => dispatch(setGfpganStrength(strength))}
|
||||
onClick={() => dispatch(setFacetoolStrength(strength))}
|
||||
/>
|
||||
)}
|
||||
{type === 'esrgan' && scale !== undefined && (
|
||||
@ -321,12 +323,46 @@ const ImageMetadataViewer = memo(
|
||||
<MetadataItem
|
||||
label="Strength"
|
||||
value={strength}
|
||||
onClick={() =>
|
||||
dispatch(setGfpganStrength(strength))
|
||||
}
|
||||
onClick={() => {
|
||||
dispatch(setFacetoolStrength(strength));
|
||||
dispatch(setFacetoolType('gfpgan'));
|
||||
}}
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
} else if (postprocess.type === 'codeformer') {
|
||||
const { strength, fidelity } = postprocess;
|
||||
return (
|
||||
<Flex
|
||||
key={i}
|
||||
pl={'2rem'}
|
||||
gap={1}
|
||||
direction={'column'}
|
||||
>
|
||||
<Text size={'md'}>{`${
|
||||
i + 1
|
||||
}: Face restoration (Codeformer)`}</Text>
|
||||
|
||||
<MetadataItem
|
||||
label="Strength"
|
||||
value={strength}
|
||||
onClick={() => {
|
||||
dispatch(setFacetoolStrength(strength));
|
||||
dispatch(setFacetoolType('codeformer'));
|
||||
}}
|
||||
/>
|
||||
{fidelity && (
|
||||
<MetadataItem
|
||||
label="Fidelity"
|
||||
value={fidelity}
|
||||
onClick={() => {
|
||||
dispatch(setCodeformerFidelity(fidelity));
|
||||
dispatch(setFacetoolType('codeformer'));
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
}
|
||||
)}
|
||||
|
@ -72,7 +72,13 @@ export const gallerySlice = createSlice({
|
||||
},
|
||||
addImage: (state, action: PayloadAction<InvokeAI.Image>) => {
|
||||
const newImage = action.payload;
|
||||
const { uuid, mtime } = newImage;
|
||||
const { uuid, url, mtime } = newImage;
|
||||
|
||||
// Do not add duplicate images
|
||||
if (state.images.find((i) => i.url === url && i.mtime === mtime)) {
|
||||
return;
|
||||
}
|
||||
|
||||
state.images.unshift(newImage);
|
||||
state.currentImageUuid = uuid;
|
||||
state.intermediateImage = undefined;
|
||||
@ -120,8 +126,15 @@ export const gallerySlice = createSlice({
|
||||
) => {
|
||||
const { images, areMoreImagesAvailable } = action.payload;
|
||||
if (images.length > 0) {
|
||||
// Filter images that already exist in the gallery
|
||||
const newImages = images.filter(
|
||||
(newImage) =>
|
||||
!state.images.find(
|
||||
(i) => i.url === newImage.url && i.mtime === newImage.mtime
|
||||
)
|
||||
);
|
||||
state.images = state.images
|
||||
.concat(images)
|
||||
.concat(newImages)
|
||||
.sort((a, b) => b.mtime - a.mtime);
|
||||
|
||||
if (!state.currentImage) {
|
||||
|
@ -6,21 +6,21 @@ import {
|
||||
useAppSelector,
|
||||
} from '../../../../app/store';
|
||||
import IAISwitch from '../../../../common/components/IAISwitch';
|
||||
import { setShouldRunGFPGAN } from '../../optionsSlice';
|
||||
import { setShouldRunFacetool } from '../../optionsSlice';
|
||||
|
||||
export default function FaceRestore() {
|
||||
const isGFPGANAvailable = useAppSelector(
|
||||
(state: RootState) => state.system.isGFPGANAvailable
|
||||
);
|
||||
|
||||
const shouldRunGFPGAN = useAppSelector(
|
||||
(state: RootState) => state.options.shouldRunGFPGAN
|
||||
const shouldRunFacetool = useAppSelector(
|
||||
(state: RootState) => state.options.shouldRunFacetool
|
||||
);
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const handleChangeShouldRunGFPGAN = (e: ChangeEvent<HTMLInputElement>) =>
|
||||
dispatch(setShouldRunGFPGAN(e.target.checked));
|
||||
const handleChangeShouldRunFacetool = (e: ChangeEvent<HTMLInputElement>) =>
|
||||
dispatch(setShouldRunFacetool(e.target.checked));
|
||||
|
||||
return (
|
||||
<Flex
|
||||
@ -32,8 +32,8 @@ export default function FaceRestore() {
|
||||
<p>Restore Face</p>
|
||||
<IAISwitch
|
||||
isDisabled={!isGFPGANAvailable}
|
||||
isChecked={shouldRunGFPGAN}
|
||||
onChange={handleChangeShouldRunGFPGAN}
|
||||
isChecked={shouldRunFacetool}
|
||||
onChange={handleChangeShouldRunFacetool}
|
||||
/>
|
||||
</Flex>
|
||||
);
|
||||
|
@ -3,18 +3,29 @@ import { Flex } from '@chakra-ui/react';
|
||||
import { RootState } from '../../../../app/store';
|
||||
import { useAppDispatch, useAppSelector } from '../../../../app/store';
|
||||
|
||||
import { OptionsState, setGfpganStrength } from '../../optionsSlice';
|
||||
import {
|
||||
FacetoolType,
|
||||
OptionsState,
|
||||
setCodeformerFidelity,
|
||||
setFacetoolStrength,
|
||||
setFacetoolType,
|
||||
} from '../../optionsSlice';
|
||||
|
||||
import { createSelector } from '@reduxjs/toolkit';
|
||||
import { isEqual } from 'lodash';
|
||||
import { SystemState } from '../../../system/systemSlice';
|
||||
import IAINumberInput from '../../../../common/components/IAINumberInput';
|
||||
import IAISelect from '../../../../common/components/IAISelect';
|
||||
import { FACETOOL_TYPES } from '../../../../app/constants';
|
||||
import { ChangeEvent } from 'react';
|
||||
|
||||
const optionsSelector = createSelector(
|
||||
(state: RootState) => state.options,
|
||||
(options: OptionsState) => {
|
||||
return {
|
||||
gfpganStrength: options.gfpganStrength,
|
||||
facetoolStrength: options.facetoolStrength,
|
||||
facetoolType: options.facetoolType,
|
||||
codeformerFidelity: options.codeformerFidelity,
|
||||
};
|
||||
},
|
||||
{
|
||||
@ -43,13 +54,26 @@ const systemSelector = createSelector(
|
||||
*/
|
||||
const FaceRestoreOptions = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const { gfpganStrength } = useAppSelector(optionsSelector);
|
||||
const { facetoolStrength, facetoolType, codeformerFidelity } =
|
||||
useAppSelector(optionsSelector);
|
||||
const { isGFPGANAvailable } = useAppSelector(systemSelector);
|
||||
|
||||
const handleChangeStrength = (v: number) => dispatch(setGfpganStrength(v));
|
||||
const handleChangeStrength = (v: number) => dispatch(setFacetoolStrength(v));
|
||||
|
||||
const handleChangeCodeformerFidelity = (v: number) =>
|
||||
dispatch(setCodeformerFidelity(v));
|
||||
|
||||
const handleChangeFacetoolType = (e: ChangeEvent<HTMLSelectElement>) =>
|
||||
dispatch(setFacetoolType(e.target.value as FacetoolType));
|
||||
|
||||
return (
|
||||
<Flex direction={'column'} gap={2}>
|
||||
<IAISelect
|
||||
label="Type"
|
||||
validValues={FACETOOL_TYPES.concat()}
|
||||
value={facetoolType}
|
||||
onChange={handleChangeFacetoolType}
|
||||
/>
|
||||
<IAINumberInput
|
||||
isDisabled={!isGFPGANAvailable}
|
||||
label="Strength"
|
||||
@ -57,10 +81,23 @@ const FaceRestoreOptions = () => {
|
||||
min={0}
|
||||
max={1}
|
||||
onChange={handleChangeStrength}
|
||||
value={gfpganStrength}
|
||||
value={facetoolStrength}
|
||||
width="90px"
|
||||
isInteger={false}
|
||||
/>
|
||||
{facetoolType === 'codeformer' && (
|
||||
<IAINumberInput
|
||||
isDisabled={!isGFPGANAvailable}
|
||||
label="Fidelity"
|
||||
step={0.05}
|
||||
min={0}
|
||||
max={1}
|
||||
onChange={handleChangeCodeformerFidelity}
|
||||
value={codeformerFidelity}
|
||||
width="90px"
|
||||
isInteger={false}
|
||||
/>
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
@ -3,9 +3,12 @@ import type { PayloadAction } from '@reduxjs/toolkit';
|
||||
import * as InvokeAI from '../../app/invokeai';
|
||||
import promptToString from '../../common/util/promptToString';
|
||||
import { seedWeightsToString } from '../../common/util/seedWeightPairs';
|
||||
import { FACETOOL_TYPES } from '../../app/constants';
|
||||
|
||||
export type UpscalingLevel = 2 | 4;
|
||||
|
||||
export type FacetoolType = typeof FACETOOL_TYPES[number];
|
||||
|
||||
export interface OptionsState {
|
||||
prompt: string;
|
||||
iterations: number;
|
||||
@ -18,7 +21,9 @@ export interface OptionsState {
|
||||
perlin: number;
|
||||
seed: number;
|
||||
img2imgStrength: number;
|
||||
gfpganStrength: number;
|
||||
facetoolType: FacetoolType;
|
||||
facetoolStrength: number;
|
||||
codeformerFidelity: number;
|
||||
upscalingLevel: UpscalingLevel;
|
||||
upscalingStrength: number;
|
||||
shouldUseInitImage: boolean;
|
||||
@ -31,7 +36,7 @@ export interface OptionsState {
|
||||
variationAmount: number;
|
||||
seedWeights: string;
|
||||
shouldRunESRGAN: boolean;
|
||||
shouldRunGFPGAN: boolean;
|
||||
shouldRunFacetool: boolean;
|
||||
shouldRandomizeSeed: boolean;
|
||||
showAdvancedOptions: boolean;
|
||||
activeTab: number;
|
||||
@ -63,8 +68,10 @@ const initialOptionsState: OptionsState = {
|
||||
shouldRunESRGAN: false,
|
||||
upscalingLevel: 4,
|
||||
upscalingStrength: 0.75,
|
||||
shouldRunGFPGAN: false,
|
||||
gfpganStrength: 0.8,
|
||||
shouldRunFacetool: false,
|
||||
facetoolStrength: 0.8,
|
||||
facetoolType: 'gfpgan',
|
||||
codeformerFidelity: 0.75,
|
||||
shouldRandomizeSeed: true,
|
||||
showAdvancedOptions: true,
|
||||
activeTab: 0,
|
||||
@ -117,8 +124,11 @@ export const optionsSlice = createSlice({
|
||||
setImg2imgStrength: (state, action: PayloadAction<number>) => {
|
||||
state.img2imgStrength = action.payload;
|
||||
},
|
||||
setGfpganStrength: (state, action: PayloadAction<number>) => {
|
||||
state.gfpganStrength = action.payload;
|
||||
setFacetoolStrength: (state, action: PayloadAction<number>) => {
|
||||
state.facetoolStrength = action.payload;
|
||||
},
|
||||
setCodeformerFidelity: (state, action: PayloadAction<number>) => {
|
||||
state.codeformerFidelity = action.payload;
|
||||
},
|
||||
setUpscalingLevel: (state, action: PayloadAction<UpscalingLevel>) => {
|
||||
state.upscalingLevel = action.payload;
|
||||
@ -229,8 +239,8 @@ export const optionsSlice = createSlice({
|
||||
// (postprocess: InvokeAI.PostProcessedImageMetadata) => {
|
||||
// if (postprocess.type === 'gfpgan') {
|
||||
// const { strength } = postprocess;
|
||||
// if (strength) state.gfpganStrength = strength;
|
||||
// state.shouldRunGFPGAN = true;
|
||||
// if (strength) state.facetoolStrength = strength;
|
||||
// state.shouldRunFacetool = true;
|
||||
// postprocessingNotDone = postprocessingNotDone.filter(
|
||||
// (p) => p !== 'gfpgan'
|
||||
// );
|
||||
@ -250,7 +260,7 @@ export const optionsSlice = createSlice({
|
||||
|
||||
// postprocessingNotDone.forEach((p) => {
|
||||
// if (p === 'esrgan') state.shouldRunESRGAN = false;
|
||||
// if (p === 'gfpgan') state.shouldRunGFPGAN = false;
|
||||
// if (p === 'gfpgan') state.shouldRunFacetool = false;
|
||||
// });
|
||||
|
||||
if (prompt) state.prompt = promptToString(prompt);
|
||||
@ -260,7 +270,7 @@ export const optionsSlice = createSlice({
|
||||
if (threshold) state.threshold = threshold;
|
||||
if (typeof threshold === 'undefined') state.threshold = 0;
|
||||
if (perlin) state.perlin = perlin;
|
||||
if (typeof perlin === 'undefined') state.perlin = 0;
|
||||
if (typeof perlin === 'undefined') state.perlin = 0;
|
||||
if (typeof seamless === 'boolean') state.seamless = seamless;
|
||||
if (typeof hires_fix === 'boolean') state.hiresFix = hires_fix;
|
||||
if (width) state.width = width;
|
||||
@ -272,8 +282,11 @@ export const optionsSlice = createSlice({
|
||||
...initialOptionsState,
|
||||
};
|
||||
},
|
||||
setShouldRunGFPGAN: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldRunGFPGAN = action.payload;
|
||||
setShouldRunFacetool: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldRunFacetool = action.payload;
|
||||
},
|
||||
setFacetoolType: (state, action: PayloadAction<FacetoolType>) => {
|
||||
state.facetoolType = action.payload;
|
||||
},
|
||||
setShouldRunESRGAN: (state, action: PayloadAction<boolean>) => {
|
||||
state.shouldRunESRGAN = action.payload;
|
||||
@ -310,7 +323,9 @@ export const {
|
||||
setSeamless,
|
||||
setHiresFix,
|
||||
setImg2imgStrength,
|
||||
setGfpganStrength,
|
||||
setFacetoolStrength,
|
||||
setFacetoolType,
|
||||
setCodeformerFidelity,
|
||||
setUpscalingLevel,
|
||||
setUpscalingStrength,
|
||||
setShouldUseInitImage,
|
||||
@ -324,7 +339,7 @@ export const {
|
||||
setSeedWeights,
|
||||
setVariationAmount,
|
||||
setAllParameters,
|
||||
setShouldRunGFPGAN,
|
||||
setShouldRunFacetool,
|
||||
setShouldRunESRGAN,
|
||||
setShouldRandomizeSeed,
|
||||
setShowAdvancedOptions,
|
||||
|
@ -34,6 +34,26 @@ from ldm.invoke.image_util import InitImageResizer
|
||||
from ldm.invoke.devices import choose_torch_device, choose_precision
|
||||
from ldm.invoke.conditioning import get_uc_and_c
|
||||
from ldm.invoke.model_cache import ModelCache
|
||||
from ldm.invoke.seamless import configure_model_padding
|
||||
from ldm.invoke.txt2mask import Txt2Mask, SegmentedGrayscale
|
||||
|
||||
def fix_func(orig):
|
||||
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
|
||||
def new_func(*args, **kw):
|
||||
device = kw.get("device", "mps")
|
||||
kw["device"]="cpu"
|
||||
return orig(*args, **kw).to(device)
|
||||
return new_func
|
||||
return orig
|
||||
|
||||
torch.rand = fix_func(torch.rand)
|
||||
torch.rand_like = fix_func(torch.rand_like)
|
||||
torch.randn = fix_func(torch.randn)
|
||||
torch.randn_like = fix_func(torch.randn_like)
|
||||
torch.randint = fix_func(torch.randint)
|
||||
torch.randint_like = fix_func(torch.randint_like)
|
||||
torch.bernoulli = fix_func(torch.bernoulli)
|
||||
torch.multinomial = fix_func(torch.multinomial)
|
||||
|
||||
# this is fallback model in case no default is defined
|
||||
FALLBACK_MODEL_NAME='stable-diffusion-1.4'
|
||||
@ -157,6 +177,7 @@ class Generate:
|
||||
self.precision = precision
|
||||
self.strength = 0.75
|
||||
self.seamless = False
|
||||
self.seamless_axes = {'x','y'}
|
||||
self.hires_fix = False
|
||||
self.embedding_path = embedding_path
|
||||
self.model = None # empty for now
|
||||
@ -172,6 +193,7 @@ class Generate:
|
||||
self.esrgan = esrgan
|
||||
self.free_gpu_mem = free_gpu_mem
|
||||
self.size_matters = True # used to warn once about large image sizes and VRAM
|
||||
self.txt2mask = None
|
||||
|
||||
# Note that in previous versions, there was an option to pass the
|
||||
# device to Generate(). However the device was then ignored, so
|
||||
@ -244,6 +266,7 @@ class Generate:
|
||||
height = None,
|
||||
sampler_name = None,
|
||||
seamless = False,
|
||||
seamless_axes = {'x','y'},
|
||||
log_tokenization = False,
|
||||
with_variations = None,
|
||||
variation_amount = 0.0,
|
||||
@ -252,6 +275,7 @@ class Generate:
|
||||
# these are specific to img2img and inpaint
|
||||
init_img = None,
|
||||
init_mask = None,
|
||||
text_mask = None,
|
||||
fit = False,
|
||||
strength = None,
|
||||
init_color = None,
|
||||
@ -264,6 +288,8 @@ class Generate:
|
||||
codeformer_fidelity = None,
|
||||
save_original = False,
|
||||
upscale = None,
|
||||
# this is specific to inpainting and causes more extreme inpainting
|
||||
inpaint_replace = 0.0,
|
||||
# Set this True to handle KeyboardInterrupt internally
|
||||
catch_interrupts = False,
|
||||
hires_fix = False,
|
||||
@ -282,6 +308,8 @@ class Generate:
|
||||
seamless // whether the generated image should tile
|
||||
hires_fix // whether the Hires Fix should be applied during generation
|
||||
init_img // path to an initial image
|
||||
init_mask // path to a mask for the initial image
|
||||
text_mask // a text string that will be used to guide clipseg generation of the init_mask
|
||||
strength // strength for noising/unnoising init_img. 0.0 preserves image exactly, 1.0 replaces it completely
|
||||
facetool_strength // strength for GFPGAN/CodeFormer. 0.0 preserves image exactly, 1.0 replaces it completely
|
||||
ddim_eta // image randomness (eta=0.0 means the same seed always produces the same image)
|
||||
@ -314,6 +342,7 @@ class Generate:
|
||||
width = width or self.width
|
||||
height = height or self.height
|
||||
seamless = seamless or self.seamless
|
||||
seamless_axes = seamless_axes or self.seamless_axes
|
||||
hires_fix = hires_fix or self.hires_fix
|
||||
cfg_scale = cfg_scale or self.cfg_scale
|
||||
ddim_eta = ddim_eta or self.ddim_eta
|
||||
@ -331,10 +360,8 @@ class Generate:
|
||||
# to the width and height of the image training set
|
||||
width = width or self.width
|
||||
height = height or self.height
|
||||
|
||||
for m in model.modules():
|
||||
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
m.padding_mode = 'circular' if seamless else m._orig_padding_mode
|
||||
|
||||
configure_model_padding(model, seamless, seamless_axes)
|
||||
|
||||
assert cfg_scale > 1.0, 'CFG_Scale (-C) must be >1.0'
|
||||
assert threshold >= 0.0, '--threshold must be >=0.0'
|
||||
@ -362,6 +389,7 @@ class Generate:
|
||||
f'variation weights must be in [0.0, 1.0]: got {[weight for _, weight in with_variations]}'
|
||||
|
||||
width, height, _ = self._resolution_check(width, height, log=True)
|
||||
assert inpaint_replace >=0.0 and inpaint_replace <= 1.0,'inpaint_replace must be between 0.0 and 1.0'
|
||||
|
||||
if sampler_name and (sampler_name != self.sampler_name):
|
||||
self.sampler_name = sampler_name
|
||||
@ -388,7 +416,10 @@ class Generate:
|
||||
width,
|
||||
height,
|
||||
fit=fit,
|
||||
text_mask=text_mask,
|
||||
)
|
||||
|
||||
# TODO: Hacky selection of operation to perform. Needs to be refactored.
|
||||
if (init_image is not None) and (mask_image is not None):
|
||||
generator = self._make_inpaint()
|
||||
elif (embiggen != None or embiggen_tiles != None):
|
||||
@ -403,6 +434,7 @@ class Generate:
|
||||
generator.set_variation(
|
||||
self.seed, variation_amount, with_variations
|
||||
)
|
||||
|
||||
results = generator.generate(
|
||||
prompt,
|
||||
iterations=iterations,
|
||||
@ -424,6 +456,7 @@ class Generate:
|
||||
perlin=perlin,
|
||||
embiggen=embiggen,
|
||||
embiggen_tiles=embiggen_tiles,
|
||||
inpaint_replace=inpaint_replace,
|
||||
)
|
||||
|
||||
if init_color:
|
||||
@ -599,17 +632,14 @@ class Generate:
|
||||
width,
|
||||
height,
|
||||
fit=False,
|
||||
text_mask=None,
|
||||
):
|
||||
init_image = None
|
||||
init_mask = None
|
||||
if not img:
|
||||
return None, None
|
||||
|
||||
image = self._load_img(
|
||||
img,
|
||||
width,
|
||||
height,
|
||||
)
|
||||
image = self._load_img(img)
|
||||
|
||||
if image.width < self.width and image.height < self.height:
|
||||
print(f'>> WARNING: img2img and inpainting may produce unexpected results with initial images smaller than {self.width}x{self.height} in both dimensions')
|
||||
@ -627,10 +657,12 @@ class Generate:
|
||||
init_image = self._create_init_image(image,width,height,fit=fit) # this returns a torch tensor
|
||||
|
||||
if mask:
|
||||
mask_image = self._load_img(
|
||||
mask, width, height) # this returns an Image
|
||||
mask_image = self._load_img(mask) # this returns an Image
|
||||
init_mask = self._create_init_mask(mask_image,width,height,fit=fit)
|
||||
|
||||
elif text_mask:
|
||||
init_mask = self._txt2mask(image, text_mask, width, height, fit=fit)
|
||||
|
||||
return init_image, init_mask
|
||||
|
||||
def _make_base(self):
|
||||
@ -699,7 +731,7 @@ class Generate:
|
||||
|
||||
seed_everything(random.randrange(0, np.iinfo(np.uint32).max))
|
||||
if self.embedding_path is not None:
|
||||
model.embedding_manager.load(
|
||||
self.model.embedding_manager.load(
|
||||
self.embedding_path, self.precision == 'float32' or self.precision == 'autocast'
|
||||
)
|
||||
|
||||
@ -776,6 +808,23 @@ class Generate:
|
||||
else:
|
||||
r[0] = image
|
||||
|
||||
def apply_textmask(self, image_path:str, prompt:str, callback, threshold:float=0.5):
|
||||
assert os.path.exists(image_path), '** "{image_path}" not found. Please enter the name of an existing image file to mask **'
|
||||
basename,_ = os.path.splitext(os.path.basename(image_path))
|
||||
if self.txt2mask is None:
|
||||
self.txt2mask = Txt2Mask(device = self.device)
|
||||
segmented = self.txt2mask.segment(image_path,prompt)
|
||||
trans = segmented.to_transparent()
|
||||
inverse = segmented.to_transparent(invert=True)
|
||||
mask = segmented.to_mask(threshold)
|
||||
|
||||
path_filter = re.compile(r'[<>:"/\\|?*]')
|
||||
safe_prompt = path_filter.sub('_', prompt)[:50].rstrip(' .')
|
||||
|
||||
callback(trans,f'{safe_prompt}.deselected',use_prefix=basename)
|
||||
callback(inverse,f'{safe_prompt}.selected',use_prefix=basename)
|
||||
callback(mask,f'{safe_prompt}.masked',use_prefix=basename)
|
||||
|
||||
# to help WebGUI - front end to generator util function
|
||||
def sample_to_image(self, samples):
|
||||
return self._make_base().sample_to_image(samples)
|
||||
@ -808,7 +857,7 @@ class Generate:
|
||||
|
||||
print(msg)
|
||||
|
||||
def _load_img(self, img, width, height)->Image:
|
||||
def _load_img(self, img)->Image:
|
||||
if isinstance(img, Image.Image):
|
||||
image = img
|
||||
print(
|
||||
@ -870,6 +919,29 @@ class Generate:
|
||||
mask = ImageOps.invert(mask)
|
||||
return mask
|
||||
|
||||
# TODO: The latter part of this method repeats code from _create_init_mask()
|
||||
def _txt2mask(self, image:Image, text_mask:list, width, height, fit=True) -> Image:
|
||||
prompt = text_mask[0]
|
||||
confidence_level = text_mask[1] if len(text_mask)>1 else 0.5
|
||||
if self.txt2mask is None:
|
||||
self.txt2mask = Txt2Mask(device = self.device)
|
||||
|
||||
segmented = self.txt2mask.segment(image, prompt)
|
||||
mask = segmented.to_mask(float(confidence_level))
|
||||
mask = mask.convert('RGB')
|
||||
# now we adjust the size
|
||||
if fit:
|
||||
mask = self._fit_image(mask, (width, height))
|
||||
else:
|
||||
mask = self._squeeze_image(mask)
|
||||
mask = mask.resize((mask.width//downsampling, mask.height //
|
||||
downsampling), resample=Image.Resampling.NEAREST)
|
||||
mask = np.array(mask)
|
||||
mask = mask.astype(np.float32) / 255.0
|
||||
mask = mask[None].transpose(0, 3, 1, 2)
|
||||
mask = torch.from_numpy(mask)
|
||||
return mask.to(self.device)
|
||||
|
||||
def _has_transparency(self, image):
|
||||
if image.info.get("transparency", None) is not None:
|
||||
return True
|
||||
|
@ -239,16 +239,17 @@ class Args(object):
|
||||
switches.append(f'--init_color {a["init_color"]}')
|
||||
if a['strength'] and a['strength']>0:
|
||||
switches.append(f'-f {a["strength"]}')
|
||||
if a['inpaint_replace']:
|
||||
switches.append(f'--inpaint_replace')
|
||||
else:
|
||||
switches.append(f'-A {a["sampler_name"]}')
|
||||
|
||||
# facetool-specific parameters
|
||||
if a['facetool']:
|
||||
switches.append(f'-ft {a["facetool"]}')
|
||||
# facetool-specific parameters, only print if running facetool
|
||||
if a['facetool_strength']:
|
||||
switches.append(f'-G {a["facetool_strength"]}')
|
||||
if a['codeformer_fidelity']:
|
||||
switches.append(f'-cf {a["codeformer_fidelity"]}')
|
||||
switches.append(f'-ft {a["facetool"]}')
|
||||
if a["facetool"] == "codeformer":
|
||||
switches.append(f'-cf {a["codeformer_fidelity"]}')
|
||||
|
||||
if a['outcrop']:
|
||||
switches.append(f'-c {" ".join([str(u) for u in a["outcrop"]])}')
|
||||
@ -266,11 +267,12 @@ class Args(object):
|
||||
# outpainting parameters
|
||||
if a['out_direction']:
|
||||
switches.append(f'-D {" ".join([str(u) for u in a["out_direction"]])}')
|
||||
|
||||
# LS: slight semantic drift which needs addressing in the future:
|
||||
# 1. Variations come out of the stored metadata as a packed string with the keyword "variations"
|
||||
# 2. However, they come out of the CLI (and probably web) with the keyword "with_variations" and
|
||||
# in broken-out form. Variation (1) should be changed to comply with (2)
|
||||
if a['with_variations']:
|
||||
if a['with_variations'] and len(a['with_variations'])>0:
|
||||
formatted_variations = ','.join(f'{seed}:{weight}' for seed, weight in (a["with_variations"]))
|
||||
switches.append(f'-V {formatted_variations}')
|
||||
if 'variations' in a and len(a['variations'])>0:
|
||||
@ -375,6 +377,14 @@ class Args(object):
|
||||
'--model',
|
||||
help='Indicates which diffusion model to load (defaults to "default" stanza in configs/models.yaml)',
|
||||
)
|
||||
model_group.add_argument(
|
||||
'--png_compression','-z',
|
||||
type=int,
|
||||
default=6,
|
||||
choices=range(0,9),
|
||||
dest='png_compression',
|
||||
help='level of PNG compression, from 0 (none) to 9 (maximum). Default is 6.'
|
||||
)
|
||||
model_group.add_argument(
|
||||
'--sampler',
|
||||
'-A',
|
||||
@ -653,6 +663,14 @@ class Args(object):
|
||||
dest='save_intermediates',
|
||||
help='Save every nth intermediate image into an "intermediates" directory within the output directory'
|
||||
)
|
||||
render_group.add_argument(
|
||||
'--png_compression','-z',
|
||||
type=int,
|
||||
default=6,
|
||||
choices=range(0,10),
|
||||
dest='png_compression',
|
||||
help='level of PNG compression, from 0 (none) to 9 (maximum). Default is 6.'
|
||||
)
|
||||
img2img_group.add_argument(
|
||||
'-I',
|
||||
'--init_img',
|
||||
@ -665,6 +683,14 @@ class Args(object):
|
||||
type=str,
|
||||
help='Path to input mask for inpainting mode (supersedes width and height)',
|
||||
)
|
||||
img2img_group.add_argument(
|
||||
'-tm',
|
||||
'--text_mask',
|
||||
nargs='+',
|
||||
type=str,
|
||||
help='Use the clipseg classifier to generate the mask area for inpainting. Provide a description of the area to mask ("a mug"), optionally followed by the confidence level threshold (0-1.0; defaults to 0.5).',
|
||||
default=None,
|
||||
)
|
||||
img2img_group.add_argument(
|
||||
'--init_color',
|
||||
type=str,
|
||||
@ -700,6 +726,13 @@ class Args(object):
|
||||
metavar=('direction','pixels'),
|
||||
help='Outcrop the image with one or more direction/pixel pairs: -c top 64 bottom 128 left 64 right 64',
|
||||
)
|
||||
img2img_group.add_argument(
|
||||
'-r',
|
||||
'--inpaint_replace',
|
||||
type=float,
|
||||
default=0.0,
|
||||
help='when inpainting, adjust how aggressively to replace the part of the picture under the mask, from 0.0 (a gentle merge) to 1.0 (replace entirely)',
|
||||
)
|
||||
postprocessing_group.add_argument(
|
||||
'-ft',
|
||||
'--facetool',
|
||||
@ -757,6 +790,12 @@ class Args(object):
|
||||
action='store_true',
|
||||
help='Change the model to seamless tiling (circular) mode',
|
||||
)
|
||||
special_effects_group.add_argument(
|
||||
'--seamless_axes',
|
||||
default=['x', 'y'],
|
||||
type=list[str],
|
||||
help='Specify which axes to use circular convolution on.',
|
||||
)
|
||||
variation_group.add_argument(
|
||||
'-v',
|
||||
'--variation_amount',
|
||||
@ -806,7 +845,8 @@ def metadata_dumps(opt,
|
||||
|
||||
# remove any image keys not mentioned in RFC #266
|
||||
rfc266_img_fields = ['type','postprocessing','sampler','prompt','seed','variations','steps',
|
||||
'cfg_scale','threshold','perlin','step_number','width','height','extra','strength']
|
||||
'cfg_scale','threshold','perlin','step_number','width','height','extra','strength',
|
||||
'init_img','init_mask']
|
||||
|
||||
rfc_dict ={}
|
||||
|
||||
@ -827,11 +867,15 @@ def metadata_dumps(opt,
|
||||
# 'variations' should always exist and be an array, empty or consisting of {'seed': seed, 'weight': weight} pairs
|
||||
rfc_dict['variations'] = [{'seed':x[0],'weight':x[1]} for x in opt.with_variations] if opt.with_variations else []
|
||||
|
||||
# if variations are present then we need to replace 'seed' with 'orig_seed'
|
||||
if hasattr(opt,'first_seed'):
|
||||
rfc_dict['seed'] = opt.first_seed
|
||||
|
||||
if opt.init_img:
|
||||
rfc_dict['type'] = 'img2img'
|
||||
rfc_dict['strength_steps'] = rfc_dict.pop('strength')
|
||||
rfc_dict['orig_hash'] = calculate_init_img_hash(opt.init_img)
|
||||
rfc_dict['sampler'] = 'ddim' # TODO: FIX ME WHEN IMG2IMG SUPPORTS ALL SAMPLERS
|
||||
rfc_dict['type'] = 'img2img'
|
||||
rfc_dict['strength_steps'] = rfc_dict.pop('strength')
|
||||
rfc_dict['orig_hash'] = calculate_init_img_hash(opt.init_img)
|
||||
rfc_dict['inpaint_replace'] = opt.inpaint_replace
|
||||
else:
|
||||
rfc_dict['type'] = 'txt2img'
|
||||
rfc_dict.pop('strength')
|
||||
|
@ -5,6 +5,7 @@ including img2img, txt2img, and inpaint
|
||||
import torch
|
||||
import numpy as np
|
||||
import random
|
||||
import os
|
||||
from tqdm import tqdm, trange
|
||||
from PIL import Image
|
||||
from einops import rearrange, repeat
|
||||
@ -168,3 +169,14 @@ class Generator():
|
||||
|
||||
return v2
|
||||
|
||||
# this is a handy routine for debugging use. Given a generated sample,
|
||||
# convert it into a PNG image and store it at the indicated path
|
||||
def save_sample(self, sample, filepath):
|
||||
image = self.sample_to_image(sample)
|
||||
dirname = os.path.dirname(filepath) or '.'
|
||||
if not os.path.exists(dirname):
|
||||
print(f'** creating directory {dirname}')
|
||||
os.makedirs(dirname, exist_ok=True)
|
||||
image.save(filepath,'PNG')
|
||||
|
||||
|
||||
|
@ -18,7 +18,7 @@ class Inpaint(Img2Img):
|
||||
@torch.no_grad()
|
||||
def get_make_image(self,prompt,sampler,steps,cfg_scale,ddim_eta,
|
||||
conditioning,init_image,mask_image,strength,
|
||||
step_callback=None,**kwargs):
|
||||
step_callback=None,inpaint_replace=False,**kwargs):
|
||||
"""
|
||||
Returns a function returning an image derived from the prompt and
|
||||
the initial image + mask. Return value depends on the seed at
|
||||
@ -58,6 +58,14 @@ class Inpaint(Img2Img):
|
||||
noise=x_T
|
||||
)
|
||||
|
||||
# to replace masked area with latent noise, weighted by inpaint_replace strength
|
||||
if inpaint_replace > 0.0:
|
||||
print(f'>> inpaint will replace what was under the mask with a strength of {inpaint_replace}')
|
||||
l_noise = self.get_noise(kwargs['width'],kwargs['height'])
|
||||
inverted_mask = 1.0-mask_image # there will be 1s where the mask is
|
||||
masked_region = (1.0-inpaint_replace) * inverted_mask * z_enc + inpaint_replace * inverted_mask * l_noise
|
||||
z_enc = z_enc * mask_image + masked_region
|
||||
|
||||
# decode it
|
||||
samples = sampler.decode(
|
||||
z_enc,
|
||||
|
@ -74,3 +74,4 @@ class Txt2Img(Generator):
|
||||
if self.perlin > 0.0:
|
||||
x = (1-self.perlin)*x + self.perlin*self.get_perlin_noise(width // self.downsampling_factor, height // self.downsampling_factor)
|
||||
return x
|
||||
|
||||
|
@ -33,13 +33,13 @@ class PngWriter:
|
||||
|
||||
# saves image named _image_ to outdir/name, writing metadata from prompt
|
||||
# returns full path of output
|
||||
def save_image_and_prompt_to_png(self, image, dream_prompt, name, metadata=None):
|
||||
def save_image_and_prompt_to_png(self, image, dream_prompt, name, metadata=None, compress_level=6):
|
||||
path = os.path.join(self.outdir, name)
|
||||
info = PngImagePlugin.PngInfo()
|
||||
info.add_text('Dream', dream_prompt)
|
||||
if metadata:
|
||||
info.add_text('sd-metadata', json.dumps(metadata))
|
||||
image.save(path, 'PNG', pnginfo=info)
|
||||
image.save(path, 'PNG', pnginfo=info, compress_level=compress_level)
|
||||
return path
|
||||
|
||||
def retrieve_metadata(self,img_basename):
|
||||
@ -66,3 +66,43 @@ def write_metadata(img_path:str, meta:dict):
|
||||
info = PngImagePlugin.PngInfo()
|
||||
info.add_text('sd-metadata', json.dumps(meta))
|
||||
im.save(img_path,'PNG',pnginfo=info)
|
||||
|
||||
class PromptFormatter:
|
||||
def __init__(self, t2i, opt):
|
||||
self.t2i = t2i
|
||||
self.opt = opt
|
||||
|
||||
# note: the t2i object should provide all these values.
|
||||
# there should be no need to or against opt values
|
||||
def normalize_prompt(self):
|
||||
"""Normalize the prompt and switches"""
|
||||
t2i = self.t2i
|
||||
opt = self.opt
|
||||
|
||||
switches = list()
|
||||
switches.append(f'"{opt.prompt}"')
|
||||
switches.append(f'-s{opt.steps or t2i.steps}')
|
||||
switches.append(f'-W{opt.width or t2i.width}')
|
||||
switches.append(f'-H{opt.height or t2i.height}')
|
||||
switches.append(f'-C{opt.cfg_scale or t2i.cfg_scale}')
|
||||
switches.append(f'-A{opt.sampler_name or t2i.sampler_name}')
|
||||
# to do: put model name into the t2i object
|
||||
# switches.append(f'--model{t2i.model_name}')
|
||||
if opt.seamless or t2i.seamless:
|
||||
switches.append(f'--seamless')
|
||||
if opt.init_img:
|
||||
switches.append(f'-I{opt.init_img}')
|
||||
if opt.fit:
|
||||
switches.append(f'--fit')
|
||||
if opt.strength and opt.init_img is not None:
|
||||
switches.append(f'-f{opt.strength or t2i.strength}')
|
||||
if opt.gfpgan_strength:
|
||||
switches.append(f'-G{opt.gfpgan_strength}')
|
||||
if opt.upscale:
|
||||
switches.append(f'-U {" ".join([str(u) for u in opt.upscale])}')
|
||||
if opt.variation_amount > 0:
|
||||
switches.append(f'-v{opt.variation_amount}')
|
||||
if opt.with_variations:
|
||||
formatted_variations = ','.join(f'{seed}:{weight}' for seed, weight in opt.with_variations)
|
||||
switches.append(f'-V{formatted_variations}')
|
||||
return ' '.join(switches)
|
||||
|
@ -52,8 +52,12 @@ COMMANDS = (
|
||||
'--skip_normalize','-x',
|
||||
'--log_tokenization','-t',
|
||||
'--hires_fix',
|
||||
'--inpaint_replace','-r',
|
||||
'--png_compression','-z',
|
||||
'--text_mask','-tm',
|
||||
'!fix','!fetch','!history','!search','!clear',
|
||||
'!models','!switch','!import_model','!edit_model','!del_model',
|
||||
'!mask',
|
||||
)
|
||||
MODEL_COMMANDS = (
|
||||
'!switch',
|
||||
@ -69,6 +73,7 @@ IMG_PATH_COMMANDS = (
|
||||
IMG_FILE_COMMANDS=(
|
||||
'!fix',
|
||||
'!fetch',
|
||||
'!mask',
|
||||
'--init_img[=\s]','-I',
|
||||
'--init_mask[=\s]','-M',
|
||||
'--init_color[=\s]',
|
||||
|
@ -41,10 +41,12 @@ class CodeFormerRestoration():
|
||||
cf.eval()
|
||||
|
||||
image = image.convert('RGB')
|
||||
# Codeformer expects a BGR np array; make array and flip channels
|
||||
bgr_image_array = np.array(image, dtype=np.uint8)[...,::-1]
|
||||
|
||||
face_helper = FaceRestoreHelper(upscale_factor=1, use_parse=True, device=device)
|
||||
face_helper.clean_all()
|
||||
face_helper.read_image(np.array(image, dtype=np.uint8))
|
||||
face_helper.read_image(bgr_image_array)
|
||||
face_helper.get_face_landmarks_5(resize=640, eye_dist_threshold=5)
|
||||
face_helper.align_warp_face()
|
||||
|
||||
@ -71,7 +73,8 @@ class CodeFormerRestoration():
|
||||
|
||||
restored_img = face_helper.paste_faces_to_input_image()
|
||||
|
||||
res = Image.fromarray(restored_img)
|
||||
# Flip the channels back to RGB
|
||||
res = Image.fromarray(restored_img[...,::-1])
|
||||
|
||||
if strength < 1.0:
|
||||
# Resize the image to the new image if the sizes have changed
|
||||
|
@ -55,13 +55,18 @@ class GFPGAN():
|
||||
|
||||
image = image.convert('RGB')
|
||||
|
||||
# GFPGAN expects a BGR np array; make array and flip channels
|
||||
bgr_image_array = np.array(image, dtype=np.uint8)[...,::-1]
|
||||
|
||||
_, _, restored_img = self.gfpgan.enhance(
|
||||
np.array(image, dtype=np.uint8),
|
||||
bgr_image_array,
|
||||
has_aligned=False,
|
||||
only_center_face=False,
|
||||
paste_back=True,
|
||||
)
|
||||
res = Image.fromarray(restored_img)
|
||||
|
||||
# Flip the channels back to RGB
|
||||
res = Image.fromarray(restored_img[...,::-1])
|
||||
|
||||
if strength < 1.0:
|
||||
# Resize the image to the new image if the sizes have changed
|
||||
|
@ -60,14 +60,18 @@ class ESRGAN():
|
||||
print(
|
||||
f'>> Real-ESRGAN Upscaling seed:{seed} : scale:{upsampler_scale}x'
|
||||
)
|
||||
|
||||
# REALSRGAN expects a BGR np array; make array and flip channels
|
||||
bgr_image_array = np.array(image, dtype=np.uint8)[...,::-1]
|
||||
|
||||
output, _ = upsampler.enhance(
|
||||
np.array(image, dtype=np.uint8),
|
||||
bgr_image_array,
|
||||
outscale=upsampler_scale,
|
||||
alpha_upsampler='realesrgan',
|
||||
)
|
||||
|
||||
res = Image.fromarray(output)
|
||||
# Flip the channels back to RGB
|
||||
res = Image.fromarray(output[...,::-1])
|
||||
|
||||
if strength < 1.0:
|
||||
# Resize the image to the new image if the sizes have changed
|
||||
|
30
ldm/invoke/seamless.py
Normal file
@ -0,0 +1,30 @@
|
||||
import torch.nn as nn
|
||||
|
||||
def _conv_forward_asymmetric(self, input, weight, bias):
|
||||
"""
|
||||
Patch for Conv2d._conv_forward that supports asymmetric padding
|
||||
"""
|
||||
working = nn.functional.pad(input, self.asymmetric_padding['x'], mode=self.asymmetric_padding_mode['x'])
|
||||
working = nn.functional.pad(working, self.asymmetric_padding['y'], mode=self.asymmetric_padding_mode['y'])
|
||||
return nn.functional.conv2d(working, weight, bias, self.stride, nn.modules.utils._pair(0), self.dilation, self.groups)
|
||||
|
||||
def configure_model_padding(model, seamless, seamless_axes):
|
||||
"""
|
||||
Modifies the 2D convolution layers to use a circular padding mode based on the `seamless` and `seamless_axes` options.
|
||||
"""
|
||||
for m in model.modules():
|
||||
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
|
||||
if seamless:
|
||||
m.asymmetric_padding_mode = {}
|
||||
m.asymmetric_padding = {}
|
||||
m.asymmetric_padding_mode['x'] = 'circular' if ('x' in seamless_axes) else 'constant'
|
||||
m.asymmetric_padding['x'] = (m._reversed_padding_repeated_twice[0], m._reversed_padding_repeated_twice[1], 0, 0)
|
||||
m.asymmetric_padding_mode['y'] = 'circular' if ('y' in seamless_axes) else 'constant'
|
||||
m.asymmetric_padding['y'] = (0, 0, m._reversed_padding_repeated_twice[2], m._reversed_padding_repeated_twice[3])
|
||||
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
|
||||
else:
|
||||
m._conv_forward = nn.Conv2d._conv_forward.__get__(m, nn.Conv2d)
|
||||
if hasattr(m, 'asymmetric_padding_mode'):
|
||||
del m.asymmetric_padding_mode
|
||||
if hasattr(m, 'asymmetric_padding'):
|
||||
del m.asymmetric_padding
|
246
ldm/invoke/server_legacy.py
Normal file
@ -0,0 +1,246 @@
|
||||
import argparse
|
||||
import json
|
||||
import base64
|
||||
import mimetypes
|
||||
import os
|
||||
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
|
||||
from ldm.invoke.pngwriter import PngWriter, PromptFormatter
|
||||
from threading import Event
|
||||
|
||||
def build_opt(post_data, seed, gfpgan_model_exists):
|
||||
opt = argparse.Namespace()
|
||||
setattr(opt, 'prompt', post_data['prompt'])
|
||||
setattr(opt, 'init_img', post_data['initimg'])
|
||||
setattr(opt, 'strength', float(post_data['strength']))
|
||||
setattr(opt, 'iterations', int(post_data['iterations']))
|
||||
setattr(opt, 'steps', int(post_data['steps']))
|
||||
setattr(opt, 'width', int(post_data['width']))
|
||||
setattr(opt, 'height', int(post_data['height']))
|
||||
setattr(opt, 'seamless', 'seamless' in post_data)
|
||||
setattr(opt, 'fit', 'fit' in post_data)
|
||||
setattr(opt, 'mask', 'mask' in post_data)
|
||||
setattr(opt, 'invert_mask', 'invert_mask' in post_data)
|
||||
setattr(opt, 'cfg_scale', float(post_data['cfg_scale']))
|
||||
setattr(opt, 'sampler_name', post_data['sampler_name'])
|
||||
setattr(opt, 'gfpgan_strength', float(post_data['gfpgan_strength']) if gfpgan_model_exists else 0)
|
||||
setattr(opt, 'upscale', [int(post_data['upscale_level']), float(post_data['upscale_strength'])] if post_data['upscale_level'] != '' else None)
|
||||
setattr(opt, 'progress_images', 'progress_images' in post_data)
|
||||
setattr(opt, 'seed', None if int(post_data['seed']) == -1 else int(post_data['seed']))
|
||||
setattr(opt, 'variation_amount', float(post_data['variation_amount']) if int(post_data['seed']) != -1 else 0)
|
||||
setattr(opt, 'with_variations', [])
|
||||
|
||||
broken = False
|
||||
if int(post_data['seed']) != -1 and post_data['with_variations'] != '':
|
||||
for part in post_data['with_variations'].split(','):
|
||||
seed_and_weight = part.split(':')
|
||||
if len(seed_and_weight) != 2:
|
||||
print(f'could not parse with_variation part "{part}"')
|
||||
broken = True
|
||||
break
|
||||
try:
|
||||
seed = int(seed_and_weight[0])
|
||||
weight = float(seed_and_weight[1])
|
||||
except ValueError:
|
||||
print(f'could not parse with_variation part "{part}"')
|
||||
broken = True
|
||||
break
|
||||
opt.with_variations.append([seed, weight])
|
||||
|
||||
if broken:
|
||||
raise CanceledException
|
||||
|
||||
if len(opt.with_variations) == 0:
|
||||
opt.with_variations = None
|
||||
|
||||
return opt
|
||||
|
||||
class CanceledException(Exception):
|
||||
pass
|
||||
|
||||
class DreamServer(BaseHTTPRequestHandler):
|
||||
model = None
|
||||
outdir = None
|
||||
canceled = Event()
|
||||
|
||||
def do_GET(self):
|
||||
if self.path == "/":
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "text/html")
|
||||
self.end_headers()
|
||||
with open("./static/dream_web/index.html", "rb") as content:
|
||||
self.wfile.write(content.read())
|
||||
elif self.path == "/config.js":
|
||||
# unfortunately this import can't be at the top level, since that would cause a circular import
|
||||
from ldm.gfpgan.gfpgan_tools import gfpgan_model_exists
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "application/javascript")
|
||||
self.end_headers()
|
||||
config = {
|
||||
'gfpgan_model_exists': gfpgan_model_exists
|
||||
}
|
||||
self.wfile.write(bytes("let config = " + json.dumps(config) + ";\n", "utf-8"))
|
||||
elif self.path == "/run_log.json":
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "application/json")
|
||||
self.end_headers()
|
||||
output = []
|
||||
|
||||
log_file = os.path.join(self.outdir, "dream_web_log.txt")
|
||||
if os.path.exists(log_file):
|
||||
with open(log_file, "r") as log:
|
||||
for line in log:
|
||||
url, config = line.split(": {", maxsplit=1)
|
||||
config = json.loads("{" + config)
|
||||
config["url"] = url.lstrip(".")
|
||||
if os.path.exists(url):
|
||||
output.append(config)
|
||||
|
||||
self.wfile.write(bytes(json.dumps({"run_log": output}), "utf-8"))
|
||||
elif self.path == "/cancel":
|
||||
self.canceled.set()
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "application/json")
|
||||
self.end_headers()
|
||||
self.wfile.write(bytes('{}', 'utf8'))
|
||||
else:
|
||||
path = "." + self.path
|
||||
cwd = os.path.realpath(os.getcwd())
|
||||
is_in_cwd = os.path.commonprefix((os.path.realpath(path), cwd)) == cwd
|
||||
if not (is_in_cwd and os.path.exists(path)):
|
||||
self.send_response(404)
|
||||
return
|
||||
mime_type = mimetypes.guess_type(path)[0]
|
||||
if mime_type is not None:
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", mime_type)
|
||||
self.end_headers()
|
||||
with open("." + self.path, "rb") as content:
|
||||
self.wfile.write(content.read())
|
||||
else:
|
||||
self.send_response(404)
|
||||
|
||||
def do_POST(self):
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "application/json")
|
||||
self.end_headers()
|
||||
|
||||
# unfortunately this import can't be at the top level, since that would cause a circular import
|
||||
# TODO temporarily commented out, import fails for some reason
|
||||
# from ldm.gfpgan.gfpgan_tools import gfpgan_model_exists
|
||||
gfpgan_model_exists = False
|
||||
|
||||
content_length = int(self.headers['Content-Length'])
|
||||
post_data = json.loads(self.rfile.read(content_length))
|
||||
opt = build_opt(post_data, self.model.seed, gfpgan_model_exists)
|
||||
|
||||
self.canceled.clear()
|
||||
print(f">> Request to generate with prompt: {opt.prompt}")
|
||||
# In order to handle upscaled images, the PngWriter needs to maintain state
|
||||
# across images generated by each call to prompt2img(), so we define it in
|
||||
# the outer scope of image_done()
|
||||
config = post_data.copy() # Shallow copy
|
||||
config['initimg'] = config.pop('initimg_name', '')
|
||||
|
||||
images_generated = 0 # helps keep track of when upscaling is started
|
||||
images_upscaled = 0 # helps keep track of when upscaling is completed
|
||||
pngwriter = PngWriter(self.outdir)
|
||||
|
||||
prefix = pngwriter.unique_prefix()
|
||||
# if upscaling is requested, then this will be called twice, once when
|
||||
# the images are first generated, and then again when after upscaling
|
||||
# is complete. The upscaling replaces the original file, so the second
|
||||
# entry should not be inserted into the image list.
|
||||
def image_done(image, seed, upscaled=False, first_seed=-1, use_prefix=None):
|
||||
print(f'First seed: {first_seed}')
|
||||
name = f'{prefix}.{seed}.png'
|
||||
iter_opt = argparse.Namespace(**vars(opt)) # copy
|
||||
if opt.variation_amount > 0:
|
||||
this_variation = [[seed, opt.variation_amount]]
|
||||
if opt.with_variations is None:
|
||||
iter_opt.with_variations = this_variation
|
||||
else:
|
||||
iter_opt.with_variations = opt.with_variations + this_variation
|
||||
iter_opt.variation_amount = 0
|
||||
elif opt.with_variations is None:
|
||||
iter_opt.seed = seed
|
||||
normalized_prompt = PromptFormatter(self.model, iter_opt).normalize_prompt()
|
||||
path = pngwriter.save_image_and_prompt_to_png(image, f'{normalized_prompt} -S{iter_opt.seed}', name)
|
||||
|
||||
if int(config['seed']) == -1:
|
||||
config['seed'] = seed
|
||||
# Append post_data to log, but only once!
|
||||
if not upscaled:
|
||||
with open(os.path.join(self.outdir, "dream_web_log.txt"), "a") as log:
|
||||
log.write(f"{path}: {json.dumps(config)}\n")
|
||||
|
||||
self.wfile.write(bytes(json.dumps(
|
||||
{'event': 'result', 'url': path, 'seed': seed, 'config': config}
|
||||
) + '\n',"utf-8"))
|
||||
|
||||
# control state of the "postprocessing..." message
|
||||
upscaling_requested = opt.upscale or opt.gfpgan_strength > 0
|
||||
nonlocal images_generated # NB: Is this bad python style? It is typical usage in a perl closure.
|
||||
nonlocal images_upscaled # NB: Is this bad python style? It is typical usage in a perl closure.
|
||||
if upscaled:
|
||||
images_upscaled += 1
|
||||
else:
|
||||
images_generated += 1
|
||||
if upscaling_requested:
|
||||
action = None
|
||||
if images_generated >= opt.iterations:
|
||||
if images_upscaled < opt.iterations:
|
||||
action = 'upscaling-started'
|
||||
else:
|
||||
action = 'upscaling-done'
|
||||
if action:
|
||||
x = images_upscaled + 1
|
||||
self.wfile.write(bytes(json.dumps(
|
||||
{'event': action, 'processed_file_cnt': f'{x}/{opt.iterations}'}
|
||||
) + '\n',"utf-8"))
|
||||
|
||||
step_writer = PngWriter(os.path.join(self.outdir, "intermediates"))
|
||||
step_index = 1
|
||||
def image_progress(sample, step):
|
||||
if self.canceled.is_set():
|
||||
self.wfile.write(bytes(json.dumps({'event':'canceled'}) + '\n', 'utf-8'))
|
||||
raise CanceledException
|
||||
path = None
|
||||
# since rendering images is moderately expensive, only render every 5th image
|
||||
# and don't bother with the last one, since it'll render anyway
|
||||
nonlocal step_index
|
||||
if opt.progress_images and step % 5 == 0 and step < opt.steps - 1:
|
||||
image = self.model.sample_to_image(sample)
|
||||
name = f'{prefix}.{opt.seed}.{step_index}.png'
|
||||
metadata = f'{opt.prompt} -S{opt.seed} [intermediate]'
|
||||
path = step_writer.save_image_and_prompt_to_png(image, metadata, name)
|
||||
step_index += 1
|
||||
self.wfile.write(bytes(json.dumps(
|
||||
{'event': 'step', 'step': step + 1, 'url': path}
|
||||
) + '\n',"utf-8"))
|
||||
|
||||
try:
|
||||
if opt.init_img is None:
|
||||
# Run txt2img
|
||||
self.model.prompt2image(**vars(opt), step_callback=image_progress, image_callback=image_done)
|
||||
else:
|
||||
# Decode initimg as base64 to temp file
|
||||
with open("./img2img-tmp.png", "wb") as f:
|
||||
initimg = opt.init_img.split(",")[1] # Ignore mime type
|
||||
f.write(base64.b64decode(initimg))
|
||||
opt1 = argparse.Namespace(**vars(opt))
|
||||
opt1.init_img = "./img2img-tmp.png"
|
||||
|
||||
try:
|
||||
# Run img2img
|
||||
self.model.prompt2image(**vars(opt1), step_callback=image_progress, image_callback=image_done)
|
||||
finally:
|
||||
# Remove the temp file
|
||||
os.remove("./img2img-tmp.png")
|
||||
except CanceledException:
|
||||
print(f"Canceled.")
|
||||
return
|
||||
|
||||
|
||||
class ThreadingDreamServer(ThreadingHTTPServer):
|
||||
def __init__(self, server_address):
|
||||
super(ThreadingDreamServer, self).__init__(server_address, DreamServer)
|
131
ldm/invoke/txt2mask.py
Normal file
@ -0,0 +1,131 @@
|
||||
'''Makes available the Txt2Mask class, which assists in the automatic
|
||||
assignment of masks via text prompt using clipseg.
|
||||
|
||||
Here is typical usage:
|
||||
|
||||
from ldm.invoke.txt2mask import Txt2Mask, SegmentedGrayscale
|
||||
from PIL import Image
|
||||
|
||||
txt2mask = Txt2Mask(self.device)
|
||||
segmented = txt2mask.segment(Image.open('/path/to/img.png'),'a bagel')
|
||||
|
||||
# this will return a grayscale Image of the segmented data
|
||||
grayscale = segmented.to_grayscale()
|
||||
|
||||
# this will return a semi-transparent image in which the
|
||||
# selected object(s) are opaque and the rest is at various
|
||||
# levels of transparency
|
||||
transparent = segmented.to_transparent()
|
||||
|
||||
# this will return a masked image suitable for use in inpainting:
|
||||
mask = segmented.to_mask(threshold=0.5)
|
||||
|
||||
The threshold used in the call to to_mask() selects pixels for use in
|
||||
the mask that exceed the indicated confidence threshold. Values range
|
||||
from 0.0 to 1.0. The higher the threshold, the more confident the
|
||||
algorithm is. In limited testing, I have found that values around 0.5
|
||||
work fine.
|
||||
'''
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
from clipseg_models.clipseg import CLIPDensePredT
|
||||
from einops import rearrange, repeat
|
||||
from PIL import Image, ImageOps
|
||||
from torchvision import transforms
|
||||
|
||||
CLIP_VERSION = 'ViT-B/16'
|
||||
CLIPSEG_WEIGHTS = 'src/clipseg/weights/rd64-uni.pth'
|
||||
CLIPSEG_SIZE = 352
|
||||
|
||||
class SegmentedGrayscale(object):
|
||||
def __init__(self, image:Image, heatmap:torch.Tensor):
|
||||
self.heatmap = heatmap
|
||||
self.image = image
|
||||
|
||||
def to_grayscale(self)->Image:
|
||||
return self._rescale(Image.fromarray(np.uint8(self.heatmap*255)))
|
||||
|
||||
def to_mask(self,threshold:float=0.5)->Image:
|
||||
discrete_heatmap = self.heatmap.lt(threshold).int()
|
||||
return self._rescale(Image.fromarray(np.uint8(discrete_heatmap*255),mode='L'))
|
||||
|
||||
def to_transparent(self,invert:bool=False)->Image:
|
||||
transparent_image = self.image.copy()
|
||||
gs = self.to_grayscale()
|
||||
# The following line looks like a bug, but isn't.
|
||||
# For img2img, we want the selected regions to be transparent,
|
||||
# but to_grayscale() returns the opposite.
|
||||
gs = ImageOps.invert(gs) if not invert else gs
|
||||
transparent_image.putalpha(gs)
|
||||
return transparent_image
|
||||
|
||||
# unscales and uncrops the 352x352 heatmap so that it matches the image again
|
||||
def _rescale(self, heatmap:Image)->Image:
|
||||
size = self.image.width if (self.image.width > self.image.height) else self.image.height
|
||||
resized_image = heatmap.resize(
|
||||
(size,size),
|
||||
resample=Image.Resampling.LANCZOS
|
||||
)
|
||||
return resized_image.crop((0,0,self.image.width,self.image.height))
|
||||
|
||||
class Txt2Mask(object):
|
||||
'''
|
||||
Create new Txt2Mask object. The optional device argument can be one of
|
||||
'cuda', 'mps' or 'cpu'.
|
||||
'''
|
||||
def __init__(self,device='cpu'):
|
||||
print('>> Initializing clipseg model for text to mask inference')
|
||||
self.device = device
|
||||
self.model = CLIPDensePredT(version=CLIP_VERSION, reduce_dim=64, )
|
||||
self.model.eval()
|
||||
# initially we keep everything in cpu to conserve space
|
||||
self.model.to('cpu')
|
||||
self.model.load_state_dict(torch.load(CLIPSEG_WEIGHTS, map_location=torch.device('cpu')), strict=False)
|
||||
|
||||
@torch.no_grad()
|
||||
def segment(self, image, prompt:str) -> SegmentedGrayscale:
|
||||
'''
|
||||
Given a prompt string such as "a bagel", tries to identify the object in the
|
||||
provided image and returns a SegmentedGrayscale object in which the brighter
|
||||
pixels indicate where the object is inferred to be.
|
||||
'''
|
||||
self._to_device(self.device)
|
||||
prompts = [prompt] # right now we operate on just a single prompt at a time
|
||||
|
||||
transform = transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
||||
transforms.Resize((CLIPSEG_SIZE, CLIPSEG_SIZE)), # must be multiple of 64...
|
||||
])
|
||||
|
||||
if type(image) is str:
|
||||
image = Image.open(image).convert('RGB')
|
||||
|
||||
image = ImageOps.exif_transpose(image)
|
||||
img = self._scale_and_crop(image)
|
||||
img = transform(img).unsqueeze(0)
|
||||
|
||||
preds = self.model(img.repeat(len(prompts),1,1,1), prompts)[0]
|
||||
heatmap = torch.sigmoid(preds[0][0]).cpu()
|
||||
self._to_device('cpu')
|
||||
return SegmentedGrayscale(image, heatmap)
|
||||
|
||||
def _to_device(self, device):
|
||||
self.model.to(device)
|
||||
|
||||
def _scale_and_crop(self, image:Image)->Image:
|
||||
scaled_image = Image.new('RGB',(CLIPSEG_SIZE,CLIPSEG_SIZE))
|
||||
if image.width > image.height: # width is constraint
|
||||
scale = CLIPSEG_SIZE / image.width
|
||||
else:
|
||||
scale = CLIPSEG_SIZE / image.height
|
||||
scaled_image.paste(
|
||||
image.resize(
|
||||
(int(scale * image.width),
|
||||
int(scale * image.height)
|
||||
),
|
||||
resample=Image.Resampling.LANCZOS
|
||||
),box=(0,0)
|
||||
)
|
||||
return scaled_image
|
@ -1353,7 +1353,7 @@ class LatentDiffusion(DDPM):
|
||||
num_downs = self.first_stage_model.encoder.num_resolutions - 1
|
||||
rescale_latent = 2 ** (num_downs)
|
||||
|
||||
# get top left postions of patches as conforming for the bbbox tokenizer, therefore we
|
||||
# get top left positions of patches as conforming for the bbbox tokenizer, therefore we
|
||||
# need to rescale the tl patch coordinates to be in between (0,1)
|
||||
tl_patch_coordinates = [
|
||||
(
|
||||
|
@ -64,7 +64,8 @@ def make_ddim_timesteps(
|
||||
):
|
||||
if ddim_discr_method == 'uniform':
|
||||
c = num_ddpm_timesteps // num_ddim_timesteps
|
||||
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
|
||||
# ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
|
||||
ddim_timesteps = (np.arange(0, num_ddim_timesteps) * c).astype(int)
|
||||
elif ddim_discr_method == 'quad':
|
||||
ddim_timesteps = (
|
||||
(
|
||||
@ -81,8 +82,8 @@ def make_ddim_timesteps(
|
||||
|
||||
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
|
||||
# add one to get the final alpha values right (the ones from first scale to data during sampling)
|
||||
# steps_out = ddim_timesteps + 1
|
||||
steps_out = ddim_timesteps
|
||||
steps_out = ddim_timesteps + 1
|
||||
# steps_out = ddim_timesteps
|
||||
|
||||
if verbose:
|
||||
print(f'Selected timesteps for ddim sampler: {steps_out}')
|
||||
|
15
mkdocs.yml
@ -1,12 +1,12 @@
|
||||
# General
|
||||
site_name: Dream Script Docs
|
||||
site_url: https://lstein.github.io/stable-diffusion/
|
||||
site_name: Stable Diffusion Toolkit Docs
|
||||
site_url: https://invoke-ai.github.io/InvokeAI
|
||||
site_author: mauwii
|
||||
dev_addr: "127.0.0.1:8080"
|
||||
dev_addr: '127.0.0.1:8080'
|
||||
|
||||
# Repository
|
||||
repo_name: lstein/stable-diffusion
|
||||
repo_url: https://github.com/lstein/stable-diffusion
|
||||
repo_name: 'invoke-ai/InvokeAI'
|
||||
repo_url: 'https://github.com/invoke-ai/InvokeAI'
|
||||
edit_uri: edit/main/docs/
|
||||
|
||||
# Copyright
|
||||
@ -26,6 +26,7 @@ theme:
|
||||
name: Switch to dark mode
|
||||
- media: '(prefers-color-scheme: dark)'
|
||||
scheme: slate
|
||||
primary: blue
|
||||
toggle:
|
||||
icon: material/lightbulb-outline
|
||||
name: Switch to light mode
|
||||
@ -55,8 +56,8 @@ markdown_extensions:
|
||||
- pymdownx.keys
|
||||
- pymdownx.magiclink:
|
||||
repo_url_shorthand: true
|
||||
user: 'lstein'
|
||||
repo: 'stable-diffusion'
|
||||
user: 'invoke-ai'
|
||||
repo: 'InvokeAI'
|
||||
- pymdownx.mark
|
||||
- pymdownx.smartsymbols
|
||||
- pymdownx.superfences:
|
||||
|
@ -52,14 +52,14 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%cmd\n",
|
||||
"pew new --python 3.10 -r requirements-lin-win-colab-CUDA.txt --dont-activate invoke-ai"
|
||||
"pew new --python 3.10 -r requirements-lin-win-colab-CUDA.txt --dont-activate invokeai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Switch the notebook kernel to the new 'invoke-ai' environment!\n",
|
||||
"# Switch the notebook kernel to the new 'invokeai' environment!\n",
|
||||
"\n",
|
||||
"## VSCode: restart VSCode and come back to this cell\n",
|
||||
"\n",
|
||||
@ -67,7 +67,7 @@
|
||||
"1. Type \"Select Interpreter\" and select \"Jupyter: Select Interpreter to Start Jupyter Server\"\n",
|
||||
"1. VSCode will say that it needs to install packages. Click the \"Install\" button.\n",
|
||||
"1. Once the install is finished, do 1 & 2 again\n",
|
||||
"1. Pick 'invoke-ai'\n",
|
||||
"1. Pick 'invokeai'\n",
|
||||
"1. Run the following cell"
|
||||
]
|
||||
},
|
||||
@ -88,7 +88,7 @@
|
||||
"## Jupyter/JupyterLab\n",
|
||||
"\n",
|
||||
"1. Run the cell below\n",
|
||||
"1. Click on the toolbar where it says \"(ipyknel)\" ↗️. You should get a pop-up asking you to \"Select Kernel\". Pick 'invoke-ai' from the drop-down.\n"
|
||||
"1. Click on the toolbar where it says \"(ipyknel)\" ↗️. You should get a pop-up asking you to \"Select Kernel\". Pick 'invokeai' from the drop-down.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -106,9 +106,9 @@
|
||||
"source": [
|
||||
"# DO NOT RUN THIS CELL IF YOU ARE USING VSCODE!!\n",
|
||||
"%%cmd\n",
|
||||
"pew workon invoke-ai\n",
|
||||
"pew workon invokeai\n",
|
||||
"pip3 install ipykernel\n",
|
||||
"python -m ipykernel install --name=invoke-ai"
|
||||
"python -m ipykernel install --name=invokeai"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -183,7 +183,7 @@
|
||||
"Now:\n",
|
||||
"\n",
|
||||
"1. `cd` to wherever the 'InvokeAI' directory is\n",
|
||||
"1. Run `pew workon invoke-ai`\n",
|
||||
"1. Run `pew workon invokeai`\n",
|
||||
"1. Run `winpty python scripts\\dream.py`"
|
||||
]
|
||||
},
|
||||
|
@ -12,14 +12,15 @@ pillow==9.2.0
|
||||
pudb==2019.2
|
||||
torch==1.12.1
|
||||
torchvision==0.13.0
|
||||
pytorch-lightning==1.4.2
|
||||
pytorch-lightning==1.7.7
|
||||
streamlit==1.12.0
|
||||
test-tube>=0.7.5
|
||||
torch-fidelity==0.3.0
|
||||
torchmetrics==0.6.0
|
||||
transformers==4.19.2
|
||||
transformers==4.21.3
|
||||
-e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
-e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
-e git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion
|
||||
-e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
||||
-e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
-e .
|
||||
|
@ -1,6 +1,6 @@
|
||||
-r requirements.txt
|
||||
|
||||
protobuf==3.19.4
|
||||
protobuf==3.19.6
|
||||
torch
|
||||
torchvision
|
||||
-e .
|
||||
|
@ -35,3 +35,4 @@ realesrgan
|
||||
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
||||
git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
|
@ -95,7 +95,10 @@ def main():
|
||||
"\n* Initialization done! Awaiting your command (-h for help, 'q' to quit)"
|
||||
)
|
||||
|
||||
main_loop(gen, opt, infile)
|
||||
try:
|
||||
main_loop(gen, opt, infile)
|
||||
except KeyboardInterrupt:
|
||||
print("\ngoodbye!")
|
||||
|
||||
# TODO: main_loop() has gotten busy. Needs to be refactored.
|
||||
def main_loop(gen, opt, infile):
|
||||
@ -222,9 +225,13 @@ def main_loop(gen, opt, infile):
|
||||
os.makedirs(opt.outdir)
|
||||
current_outdir = opt.outdir
|
||||
|
||||
# write out the history at this point
|
||||
# Write out the history at this point.
|
||||
# TODO: Fix the parsing of command-line parameters
|
||||
# so that !operations don't need to be stripped and readded
|
||||
if operation == 'postprocess':
|
||||
completer.add_history(f'!fix {command}')
|
||||
elif operation == 'mask':
|
||||
completer.add_history(f'!mask {command}')
|
||||
else:
|
||||
completer.add_history(command)
|
||||
|
||||
@ -244,13 +251,28 @@ def main_loop(gen, opt, infile):
|
||||
# when the -v switch is used to generate variations
|
||||
nonlocal prior_variations
|
||||
nonlocal prefix
|
||||
if use_prefix is not None:
|
||||
prefix = use_prefix
|
||||
|
||||
path = None
|
||||
if opt.grid:
|
||||
grid_images[seed] = image
|
||||
|
||||
elif operation == 'mask':
|
||||
filename = f'{prefix}.{use_prefix}.{seed}.png'
|
||||
tm = opt.text_mask[0]
|
||||
th = opt.text_mask[1] if len(opt.text_mask)>1 else 0.5
|
||||
formatted_dream_prompt = f'!mask {opt.prompt} -tm {tm} {th}'
|
||||
path = file_writer.save_image_and_prompt_to_png(
|
||||
image = image,
|
||||
dream_prompt = formatted_dream_prompt,
|
||||
metadata = {},
|
||||
name = filename,
|
||||
compress_level = opt.png_compression,
|
||||
)
|
||||
results.append([path, formatted_dream_prompt])
|
||||
|
||||
else:
|
||||
if use_prefix is not None:
|
||||
prefix = use_prefix
|
||||
postprocessed = upscaled if upscaled else operation=='postprocess'
|
||||
filename, formatted_dream_prompt = prepare_image_metadata(
|
||||
opt,
|
||||
@ -270,6 +292,7 @@ def main_loop(gen, opt, infile):
|
||||
model_hash = gen.model_hash,
|
||||
),
|
||||
name = filename,
|
||||
compress_level = opt.png_compression,
|
||||
)
|
||||
|
||||
# update rfc metadata
|
||||
@ -288,7 +311,7 @@ def main_loop(gen, opt, infile):
|
||||
results.append([path, formatted_dream_prompt])
|
||||
|
||||
# so that the seed autocompletes (on linux|mac when -S or --seed specified
|
||||
if completer:
|
||||
if completer and operation == 'generate':
|
||||
completer.add_seed(seed)
|
||||
completer.add_seed(first_seed)
|
||||
last_results.append([path, seed])
|
||||
@ -306,6 +329,10 @@ def main_loop(gen, opt, infile):
|
||||
print(f'>> fixing {opt.prompt}')
|
||||
opt.last_operation = do_postprocess(gen,opt,image_writer)
|
||||
|
||||
elif operation == 'mask':
|
||||
print(f'>> generating masks from {opt.prompt}')
|
||||
do_textmask(gen, opt, image_writer)
|
||||
|
||||
if opt.grid and len(grid_images) > 0:
|
||||
grid_img = make_grid(list(grid_images.values()))
|
||||
grid_seeds = list(grid_images.keys())
|
||||
@ -352,6 +379,10 @@ def do_command(command:str, gen, opt:Args, completer) -> tuple:
|
||||
command = command.replace('!fix ','',1)
|
||||
operation = 'postprocess'
|
||||
|
||||
elif command.startswith('!mask'):
|
||||
command = command.replace('!mask ','',1)
|
||||
operation = 'mask'
|
||||
|
||||
elif command.startswith('!switch'):
|
||||
model_name = command.replace('!switch ','',1)
|
||||
gen.set_model(model_name)
|
||||
@ -360,6 +391,7 @@ def do_command(command:str, gen, opt:Args, completer) -> tuple:
|
||||
|
||||
elif command.startswith('!models'):
|
||||
gen.model_cache.print_models()
|
||||
completer.add_history(command)
|
||||
operation = None
|
||||
|
||||
elif command.startswith('!import'):
|
||||
@ -529,6 +561,19 @@ def write_config_file(conf_path, gen, model_name, new_config, clobber=False, mak
|
||||
gen.set_model(current_model)
|
||||
return True
|
||||
|
||||
def do_textmask(gen, opt, callback):
|
||||
image_path = opt.prompt
|
||||
assert os.path.exists(image_path), '** "{image_path}" not found. Please enter the name of an existing image file to mask **'
|
||||
assert opt.text_mask is not None and len(opt.text_mask) >= 1, '** Please provide a text mask with -tm **'
|
||||
tm = opt.text_mask[0]
|
||||
threshold = float(opt.text_mask[1]) if len(opt.text_mask) > 1 else 0.5
|
||||
gen.apply_textmask(
|
||||
image_path = image_path,
|
||||
prompt = tm,
|
||||
threshold = threshold,
|
||||
callback = callback,
|
||||
)
|
||||
|
||||
def do_postprocess (gen, opt, callback):
|
||||
file_path = opt.prompt # treat the prompt as the file pathname
|
||||
if os.path.dirname(file_path) == '': #basename given
|
||||
@ -705,7 +750,6 @@ def load_face_restoration(opt):
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print('>> You may need to install the ESRGAN and/or GFPGAN modules')
|
||||
return gfpgan,codeformer,esrgan
|
||||
|
||||
|
||||
def make_step_callback(gen, opt, prefix):
|
||||
destination = os.path.join(opt.outdir,'intermediates',prefix)
|
||||
|
685
scripts/legacy_api.py
Executable file
@ -0,0 +1,685 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
|
||||
|
||||
import argparse
|
||||
import shlex
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import copy
|
||||
import warnings
|
||||
import time
|
||||
import ldm.invoke.readline
|
||||
from ldm.invoke.pngwriter import PngWriter, PromptFormatter
|
||||
from ldm.invoke.server_legacy import DreamServer, ThreadingDreamServer
|
||||
from ldm.invoke.image_util import make_grid
|
||||
from omegaconf import OmegaConf
|
||||
|
||||
# Placeholder to be replaced with proper class that tracks the
|
||||
# outputs and associates with the prompt that generated them.
|
||||
# Just want to get the formatting look right for now.
|
||||
output_cntr = 0
|
||||
|
||||
|
||||
def main():
|
||||
"""Initialize command-line parsers and the diffusion model"""
|
||||
arg_parser = create_argv_parser()
|
||||
opt = arg_parser.parse_args()
|
||||
|
||||
if opt.laion400m:
|
||||
print('--laion400m flag has been deprecated. Please use --model laion400m instead.')
|
||||
sys.exit(-1)
|
||||
if opt.weights != 'model':
|
||||
print('--weights argument has been deprecated. Please configure ./configs/models.yaml, and call it using --model instead.')
|
||||
sys.exit(-1)
|
||||
|
||||
try:
|
||||
models = OmegaConf.load(opt.config)
|
||||
width = models[opt.model].width
|
||||
height = models[opt.model].height
|
||||
config = models[opt.model].config
|
||||
weights = models[opt.model].weights
|
||||
except (FileNotFoundError, IOError, KeyError) as e:
|
||||
print(f'{e}. Aborting.')
|
||||
sys.exit(-1)
|
||||
|
||||
print('* Initializing, be patient...\n')
|
||||
sys.path.append('.')
|
||||
from pytorch_lightning import logging
|
||||
from ldm.generate import Generate
|
||||
|
||||
# these two lines prevent a horrible warning message from appearing
|
||||
# when the frozen CLIP tokenizer is imported
|
||||
import transformers
|
||||
|
||||
transformers.logging.set_verbosity_error()
|
||||
|
||||
# creating a simple text2image object with a handful of
|
||||
# defaults passed on the command line.
|
||||
# additional parameters will be added (or overriden) during
|
||||
# the user input loop
|
||||
t2i = Generate(
|
||||
# width=width,
|
||||
# height=height,
|
||||
sampler_name=opt.sampler_name,
|
||||
weights=weights,
|
||||
full_precision=opt.full_precision,
|
||||
config=config,
|
||||
# grid=opt.grid,
|
||||
# this is solely for recreating the prompt
|
||||
# seamless=opt.seamless,
|
||||
embedding_path=opt.embedding_path,
|
||||
# device_type=opt.device,
|
||||
# ignore_ctrl_c=opt.infile is None,
|
||||
)
|
||||
|
||||
# make sure the output directory exists
|
||||
if not os.path.exists(opt.outdir):
|
||||
os.makedirs(opt.outdir)
|
||||
|
||||
# gets rid of annoying messages about random seed
|
||||
logging.getLogger('pytorch_lightning').setLevel(logging.ERROR)
|
||||
|
||||
# load the infile as a list of lines
|
||||
infile = None
|
||||
if opt.infile:
|
||||
try:
|
||||
if os.path.isfile(opt.infile):
|
||||
infile = open(opt.infile, 'r', encoding='utf-8')
|
||||
elif opt.infile == '-': # stdin
|
||||
infile = sys.stdin
|
||||
else:
|
||||
raise FileNotFoundError(f'{opt.infile} not found.')
|
||||
except (FileNotFoundError, IOError) as e:
|
||||
print(f'{e}. Aborting.')
|
||||
sys.exit(-1)
|
||||
|
||||
if opt.seamless:
|
||||
print(">> changed to seamless tiling mode")
|
||||
|
||||
# preload the model
|
||||
t2i.load_model()
|
||||
|
||||
if not infile:
|
||||
print(
|
||||
"\n* Initialization done! Awaiting your command (-h for help, 'q' to quit)"
|
||||
)
|
||||
|
||||
cmd_parser = create_cmd_parser()
|
||||
if opt.web:
|
||||
dream_server_loop(t2i, opt.host, opt.port, opt.outdir)
|
||||
else:
|
||||
main_loop(t2i, opt.outdir, opt.prompt_as_dir, cmd_parser, infile)
|
||||
|
||||
|
||||
def main_loop(t2i, outdir, prompt_as_dir, parser, infile):
|
||||
"""prompt/read/execute loop"""
|
||||
done = False
|
||||
path_filter = re.compile(r'[<>:"/\\|?*]')
|
||||
last_results = list()
|
||||
|
||||
# os.pathconf is not available on Windows
|
||||
if hasattr(os, 'pathconf'):
|
||||
path_max = os.pathconf(outdir, 'PC_PATH_MAX')
|
||||
name_max = os.pathconf(outdir, 'PC_NAME_MAX')
|
||||
else:
|
||||
path_max = 260
|
||||
name_max = 255
|
||||
|
||||
while not done:
|
||||
try:
|
||||
command = get_next_command(infile)
|
||||
except EOFError:
|
||||
done = True
|
||||
continue
|
||||
except KeyboardInterrupt:
|
||||
done = True
|
||||
continue
|
||||
|
||||
# skip empty lines
|
||||
if not command.strip():
|
||||
continue
|
||||
|
||||
if command.startswith(('#', '//')):
|
||||
continue
|
||||
|
||||
# before splitting, escape single quotes so as not to mess
|
||||
# up the parser
|
||||
command = command.replace("'", "\\'")
|
||||
|
||||
try:
|
||||
elements = shlex.split(command)
|
||||
except ValueError as e:
|
||||
print(str(e))
|
||||
continue
|
||||
|
||||
if elements[0] == 'q':
|
||||
done = True
|
||||
break
|
||||
|
||||
if elements[0].startswith(
|
||||
'!dream'
|
||||
): # in case a stored prompt still contains the !dream command
|
||||
elements.pop(0)
|
||||
|
||||
# rearrange the arguments to mimic how it works in the Dream bot.
|
||||
switches = ['']
|
||||
switches_started = False
|
||||
|
||||
for el in elements:
|
||||
if el[0] == '-' and not switches_started:
|
||||
switches_started = True
|
||||
if switches_started:
|
||||
switches.append(el)
|
||||
else:
|
||||
switches[0] += el
|
||||
switches[0] += ' '
|
||||
switches[0] = switches[0][: len(switches[0]) - 1]
|
||||
|
||||
try:
|
||||
opt = parser.parse_args(switches)
|
||||
except SystemExit:
|
||||
parser.print_help()
|
||||
continue
|
||||
if len(opt.prompt) == 0:
|
||||
print('Try again with a prompt!')
|
||||
continue
|
||||
# retrieve previous value!
|
||||
if opt.init_img is not None and re.match('^-\\d+$', opt.init_img):
|
||||
try:
|
||||
opt.init_img = last_results[int(opt.init_img)][0]
|
||||
print(f'>> Reusing previous image {opt.init_img}')
|
||||
except IndexError:
|
||||
print(
|
||||
f'>> No previous initial image at position {opt.init_img} found')
|
||||
opt.init_img = None
|
||||
continue
|
||||
|
||||
if opt.seed is not None and opt.seed < 0: # retrieve previous value!
|
||||
try:
|
||||
opt.seed = last_results[opt.seed][1]
|
||||
print(f'>> Reusing previous seed {opt.seed}')
|
||||
except IndexError:
|
||||
print(f'>> No previous seed at position {opt.seed} found')
|
||||
opt.seed = None
|
||||
continue
|
||||
|
||||
do_grid = opt.grid or t2i.grid
|
||||
|
||||
if opt.with_variations is not None:
|
||||
# shotgun parsing, woo
|
||||
parts = []
|
||||
broken = False # python doesn't have labeled loops...
|
||||
for part in opt.with_variations.split(','):
|
||||
seed_and_weight = part.split(':')
|
||||
if len(seed_and_weight) != 2:
|
||||
print(f'could not parse with_variation part "{part}"')
|
||||
broken = True
|
||||
break
|
||||
try:
|
||||
seed = int(seed_and_weight[0])
|
||||
weight = float(seed_and_weight[1])
|
||||
except ValueError:
|
||||
print(f'could not parse with_variation part "{part}"')
|
||||
broken = True
|
||||
break
|
||||
parts.append([seed, weight])
|
||||
if broken:
|
||||
continue
|
||||
if len(parts) > 0:
|
||||
opt.with_variations = parts
|
||||
else:
|
||||
opt.with_variations = None
|
||||
|
||||
if opt.outdir:
|
||||
if not os.path.exists(opt.outdir):
|
||||
os.makedirs(opt.outdir)
|
||||
current_outdir = opt.outdir
|
||||
elif prompt_as_dir:
|
||||
# sanitize the prompt to a valid folder name
|
||||
subdir = path_filter.sub('_', opt.prompt)[:name_max].rstrip(' .')
|
||||
|
||||
# truncate path to maximum allowed length
|
||||
# 27 is the length of '######.##########.##.png', plus two separators and a NUL
|
||||
subdir = subdir[:(path_max - 27 - len(os.path.abspath(outdir)))]
|
||||
current_outdir = os.path.join(outdir, subdir)
|
||||
|
||||
print('Writing files to directory: "' + current_outdir + '"')
|
||||
|
||||
# make sure the output directory exists
|
||||
if not os.path.exists(current_outdir):
|
||||
os.makedirs(current_outdir)
|
||||
else:
|
||||
current_outdir = outdir
|
||||
|
||||
# Here is where the images are actually generated!
|
||||
last_results = []
|
||||
try:
|
||||
file_writer = PngWriter(current_outdir)
|
||||
prefix = file_writer.unique_prefix()
|
||||
results = [] # list of filename, prompt pairs
|
||||
grid_images = dict() # seed -> Image, only used if `do_grid`
|
||||
|
||||
def image_writer(image, seed, upscaled=False):
|
||||
path = None
|
||||
if do_grid:
|
||||
grid_images[seed] = image
|
||||
else:
|
||||
if upscaled and opt.save_original:
|
||||
filename = f'{prefix}.{seed}.postprocessed.png'
|
||||
else:
|
||||
filename = f'{prefix}.{seed}.png'
|
||||
if opt.variation_amount > 0:
|
||||
iter_opt = argparse.Namespace(**vars(opt)) # copy
|
||||
this_variation = [[seed, opt.variation_amount]]
|
||||
if opt.with_variations is None:
|
||||
iter_opt.with_variations = this_variation
|
||||
else:
|
||||
iter_opt.with_variations = opt.with_variations + this_variation
|
||||
iter_opt.variation_amount = 0
|
||||
normalized_prompt = PromptFormatter(
|
||||
t2i, iter_opt).normalize_prompt()
|
||||
metadata_prompt = f'{normalized_prompt} -S{iter_opt.seed}'
|
||||
elif opt.with_variations is not None:
|
||||
normalized_prompt = PromptFormatter(
|
||||
t2i, opt).normalize_prompt()
|
||||
# use the original seed - the per-iteration value is the last variation-seed
|
||||
metadata_prompt = f'{normalized_prompt} -S{opt.seed}'
|
||||
else:
|
||||
normalized_prompt = PromptFormatter(
|
||||
t2i, opt).normalize_prompt()
|
||||
metadata_prompt = f'{normalized_prompt} -S{seed}'
|
||||
path = file_writer.save_image_and_prompt_to_png(
|
||||
image, metadata_prompt, filename)
|
||||
if (not upscaled) or opt.save_original:
|
||||
# only append to results if we didn't overwrite an earlier output
|
||||
results.append([path, metadata_prompt])
|
||||
last_results.append([path, seed])
|
||||
|
||||
t2i.prompt2image(image_callback=image_writer, **vars(opt))
|
||||
|
||||
if do_grid and len(grid_images) > 0:
|
||||
grid_img = make_grid(list(grid_images.values()))
|
||||
grid_seeds = list(grid_images.keys())
|
||||
first_seed = last_results[0][1]
|
||||
filename = f'{prefix}.{first_seed}.png'
|
||||
# TODO better metadata for grid images
|
||||
normalized_prompt = PromptFormatter(
|
||||
t2i, opt).normalize_prompt()
|
||||
metadata_prompt = f'{normalized_prompt} -S{first_seed} --grid -n{len(grid_images)} # {grid_seeds}'
|
||||
path = file_writer.save_image_and_prompt_to_png(
|
||||
grid_img, metadata_prompt, filename
|
||||
)
|
||||
results = [[path, metadata_prompt]]
|
||||
|
||||
except AssertionError as e:
|
||||
print(e)
|
||||
continue
|
||||
|
||||
except OSError as e:
|
||||
print(e)
|
||||
continue
|
||||
|
||||
print('Outputs:')
|
||||
log_path = os.path.join(current_outdir, 'dream_log.txt')
|
||||
write_log_message(results, log_path)
|
||||
print()
|
||||
|
||||
print('goodbye!')
|
||||
|
||||
|
||||
def get_next_command(infile=None) -> str: # command string
|
||||
if infile is None:
|
||||
command = input('dream> ')
|
||||
else:
|
||||
command = infile.readline()
|
||||
if not command:
|
||||
raise EOFError
|
||||
else:
|
||||
command = command.strip()
|
||||
print(f'#{command}')
|
||||
return command
|
||||
|
||||
|
||||
def dream_server_loop(t2i, host, port, outdir):
|
||||
print('\n* --web was specified, starting web server...')
|
||||
# Change working directory to the stable-diffusion directory
|
||||
os.chdir(
|
||||
os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
)
|
||||
|
||||
# Start server
|
||||
DreamServer.model = t2i
|
||||
DreamServer.outdir = outdir
|
||||
dream_server = ThreadingDreamServer((host, port))
|
||||
print(">> Started Stable Diffusion dream server!")
|
||||
if host == '0.0.0.0':
|
||||
print(
|
||||
f"Point your browser at http://localhost:{port} or use the host's DNS name or IP address.")
|
||||
else:
|
||||
print(">> Default host address now 127.0.0.1 (localhost). Use --host 0.0.0.0 to bind any address.")
|
||||
print(f">> Point your browser at http://{host}:{port}.")
|
||||
|
||||
try:
|
||||
dream_server.serve_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
dream_server.server_close()
|
||||
|
||||
|
||||
def write_log_message(results, log_path):
|
||||
"""logs the name of the output image, prompt, and prompt args to the terminal and log file"""
|
||||
global output_cntr
|
||||
log_lines = [f'{path}: {prompt}\n' for path, prompt in results]
|
||||
for l in log_lines:
|
||||
output_cntr += 1
|
||||
print(f'[{output_cntr}] {l}',end='')
|
||||
|
||||
|
||||
with open(log_path, 'a', encoding='utf-8') as file:
|
||||
file.writelines(log_lines)
|
||||
|
||||
|
||||
SAMPLER_CHOICES = [
|
||||
'ddim',
|
||||
'k_dpm_2_a',
|
||||
'k_dpm_2',
|
||||
'k_euler_a',
|
||||
'k_euler',
|
||||
'k_heun',
|
||||
'k_lms',
|
||||
'plms',
|
||||
]
|
||||
|
||||
|
||||
def create_argv_parser():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="""Generate images using Stable Diffusion.
|
||||
Use --web to launch the web interface.
|
||||
Use --from_file to load prompts from a file path or standard input ("-").
|
||||
Otherwise you will be dropped into an interactive command prompt (type -h for help.)
|
||||
Other command-line arguments are defaults that can usually be overridden
|
||||
prompt the command prompt.
|
||||
"""
|
||||
)
|
||||
parser.add_argument(
|
||||
'--laion400m',
|
||||
'--latent_diffusion',
|
||||
'-l',
|
||||
dest='laion400m',
|
||||
action='store_true',
|
||||
help='Fallback to the latent diffusion (laion400m) weights and config',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--from_file',
|
||||
dest='infile',
|
||||
type=str,
|
||||
help='If specified, load prompts from this file',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-n',
|
||||
'--iterations',
|
||||
type=int,
|
||||
default=1,
|
||||
help='Number of images to generate',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-F',
|
||||
'--full_precision',
|
||||
dest='full_precision',
|
||||
action='store_true',
|
||||
help='Use more memory-intensive full precision math for calculations',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-g',
|
||||
'--grid',
|
||||
action='store_true',
|
||||
help='Generate a grid instead of individual images',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-A',
|
||||
'-m',
|
||||
'--sampler',
|
||||
dest='sampler_name',
|
||||
choices=SAMPLER_CHOICES,
|
||||
metavar='SAMPLER_NAME',
|
||||
default='k_lms',
|
||||
help=f'Set the initial sampler. Default: k_lms. Supported samplers: {", ".join(SAMPLER_CHOICES)}',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--outdir',
|
||||
'-o',
|
||||
type=str,
|
||||
default='outputs/img-samples',
|
||||
help='Directory to save generated images and a log of prompts and seeds. Default: outputs/img-samples',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--seamless',
|
||||
action='store_true',
|
||||
help='Change the model to seamless tiling (circular) mode',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--embedding_path',
|
||||
type=str,
|
||||
help='Path to a pre-trained embedding manager checkpoint - can only be set on command line',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--prompt_as_dir',
|
||||
'-p',
|
||||
action='store_true',
|
||||
help='Place images in subdirectories named after the prompt.',
|
||||
)
|
||||
# GFPGAN related args
|
||||
parser.add_argument(
|
||||
'--gfpgan_bg_upsampler',
|
||||
type=str,
|
||||
default='realesrgan',
|
||||
help='Background upsampler. Default: realesrgan. Options: realesrgan, none.',
|
||||
|
||||
)
|
||||
parser.add_argument(
|
||||
'--gfpgan_bg_tile',
|
||||
type=int,
|
||||
default=400,
|
||||
help='Tile size for background sampler, 0 for no tile during testing. Default: 400.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--gfpgan_model_path',
|
||||
type=str,
|
||||
default='experiments/pretrained_models/GFPGANv1.3.pth',
|
||||
help='Indicates the path to the GFPGAN model, relative to --gfpgan_dir.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--gfpgan_dir',
|
||||
type=str,
|
||||
default='./src/gfpgan',
|
||||
help='Indicates the directory containing the GFPGAN code.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--web',
|
||||
dest='web',
|
||||
action='store_true',
|
||||
help='Start in web server mode.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--host',
|
||||
type=str,
|
||||
default='127.0.0.1',
|
||||
help='Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--port',
|
||||
type=int,
|
||||
default='9090',
|
||||
help='Web server: Port to listen on'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--weights',
|
||||
default='model',
|
||||
help='Indicates the Stable Diffusion model to use.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--device',
|
||||
'-d',
|
||||
type=str,
|
||||
default='cuda',
|
||||
help="device to run stable diffusion on. defaults to cuda `torch.cuda.current_device()` if available"
|
||||
)
|
||||
parser.add_argument(
|
||||
'--model',
|
||||
default='stable-diffusion-1.4',
|
||||
help='Indicates which diffusion model to load. (currently "stable-diffusion-1.4" (default) or "laion400m")',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--config',
|
||||
default='configs/models.yaml',
|
||||
help='Path to configuration file for alternate models.',
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def create_cmd_parser():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Example: dream> a fantastic alien landscape -W1024 -H960 -s100 -n12'
|
||||
)
|
||||
parser.add_argument('prompt')
|
||||
parser.add_argument('-s', '--steps', type=int, help='Number of steps')
|
||||
parser.add_argument(
|
||||
'-S',
|
||||
'--seed',
|
||||
type=int,
|
||||
help='Image seed; a +ve integer, or use -1 for the previous seed, -2 for the one before that, etc',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-n',
|
||||
'--iterations',
|
||||
type=int,
|
||||
default=1,
|
||||
help='Number of samplings to perform (slower, but will provide seeds for individual images)',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-W', '--width', type=int, help='Image width, multiple of 64'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-H', '--height', type=int, help='Image height, multiple of 64'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-C',
|
||||
'--cfg_scale',
|
||||
default=7.5,
|
||||
type=float,
|
||||
help='Classifier free guidance (CFG) scale - higher numbers cause generator to "try" harder.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-g', '--grid', action='store_true', help='generate a grid'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--outdir',
|
||||
'-o',
|
||||
type=str,
|
||||
default=None,
|
||||
help='Directory to save generated images and a log of prompts and seeds',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--seamless',
|
||||
action='store_true',
|
||||
help='Change the model to seamless tiling (circular) mode',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-i',
|
||||
'--individual',
|
||||
action='store_true',
|
||||
help='Generate individual files (default)',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-I',
|
||||
'--init_img',
|
||||
type=str,
|
||||
help='Path to input image for img2img mode (supersedes width and height)',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-M',
|
||||
'--init_mask',
|
||||
type=str,
|
||||
help='Path to input mask for inpainting mode (supersedes width and height)',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-T',
|
||||
'-fit',
|
||||
'--fit',
|
||||
action='store_true',
|
||||
help='If specified, will resize the input image to fit within the dimensions of width x height (512x512 default)',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-f',
|
||||
'--strength',
|
||||
default=0.75,
|
||||
type=float,
|
||||
help='Strength for noising/unnoising. 0.0 preserves image exactly, 1.0 replaces it completely',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-G',
|
||||
'--gfpgan_strength',
|
||||
default=0,
|
||||
type=float,
|
||||
help='The strength at which to apply the GFPGAN model to the result, in order to improve faces.',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-U',
|
||||
'--upscale',
|
||||
nargs='+',
|
||||
default=None,
|
||||
type=float,
|
||||
help='Scale factor (2, 4) for upscaling followed by upscaling strength (0-1.0). If strength not specified, defaults to 0.75'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-save_orig',
|
||||
'--save_original',
|
||||
action='store_true',
|
||||
help='Save original. Use it when upscaling to save both versions.',
|
||||
)
|
||||
# variants is going to be superseded by a generalized "prompt-morph" function
|
||||
# parser.add_argument('-v','--variants',type=int,help="in img2img mode, the first generated image will get passed back to img2img to generate the requested number of variants")
|
||||
parser.add_argument(
|
||||
'-x',
|
||||
'--skip_normalize',
|
||||
action='store_true',
|
||||
help='Skip subprompt weight normalization',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-A',
|
||||
'-m',
|
||||
'--sampler',
|
||||
dest='sampler_name',
|
||||
default=None,
|
||||
type=str,
|
||||
choices=SAMPLER_CHOICES,
|
||||
metavar='SAMPLER_NAME',
|
||||
help=f'Switch to a different sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}',
|
||||
)
|
||||
parser.add_argument(
|
||||
'-t',
|
||||
'--log_tokenization',
|
||||
action='store_true',
|
||||
help='shows how the prompt is split into tokens'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-v',
|
||||
'--variation_amount',
|
||||
default=0.0,
|
||||
type=float,
|
||||
help='If > 0, generates variations on the initial seed instead of random seeds per iteration. Must be between 0 and 1. Higher values will be more different.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'-V',
|
||||
'--with_variations',
|
||||
default=None,
|
||||
type=str,
|
||||
help='list of variations to apply, in the format `seed:weight,seed:weight,...'
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -10,28 +10,31 @@ import sys
|
||||
import transformers
|
||||
import os
|
||||
import warnings
|
||||
import torch
|
||||
import urllib.request
|
||||
import zipfile
|
||||
import traceback
|
||||
|
||||
transformers.logging.set_verbosity_error()
|
||||
|
||||
# this will preload the Bert tokenizer fles
|
||||
print('preloading bert tokenizer...', end='')
|
||||
|
||||
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
|
||||
print('Loading bert tokenizer (ignore deprecation errors)...', end='')
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
||||
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
|
||||
print('...success')
|
||||
sys.stdout.flush()
|
||||
|
||||
# this will download requirements for Kornia
|
||||
print('preloading Kornia requirements...', end='')
|
||||
print('Loading Kornia requirements...', end='')
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
||||
import kornia
|
||||
print('...success')
|
||||
|
||||
version = 'openai/clip-vit-large-patch14'
|
||||
|
||||
print('preloading CLIP model...',end='')
|
||||
sys.stdout.flush()
|
||||
|
||||
print('Loading CLIP model...',end='')
|
||||
tokenizer = CLIPTokenizer.from_pretrained(version)
|
||||
transformer = CLIPTextModel.from_pretrained(version)
|
||||
print('...success')
|
||||
@ -61,7 +64,6 @@ if gfpgan:
|
||||
FaceRestoreHelper(1, det_model='retinaface_resnet50')
|
||||
print('...success')
|
||||
except Exception:
|
||||
import traceback
|
||||
print('Error loading ESRGAN:')
|
||||
print(traceback.format_exc())
|
||||
|
||||
@ -89,13 +91,11 @@ if gfpgan:
|
||||
urllib.request.urlretrieve(model_url,model_dest)
|
||||
print('...success')
|
||||
except Exception:
|
||||
import traceback
|
||||
print('Error loading GFPGAN:')
|
||||
print(traceback.format_exc())
|
||||
|
||||
print('preloading CodeFormer model file...',end='')
|
||||
try:
|
||||
import urllib.request
|
||||
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
|
||||
model_dest = 'ldm/invoke/restoration/codeformer/weights/codeformer.pth'
|
||||
if not os.path.exists(model_dest):
|
||||
@ -103,7 +103,35 @@ try:
|
||||
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
||||
urllib.request.urlretrieve(model_url,model_dest)
|
||||
except Exception:
|
||||
import traceback
|
||||
print('Error loading CodeFormer:')
|
||||
print(traceback.format_exc())
|
||||
print('...success')
|
||||
|
||||
print('Loading clipseg model for text-based masking...',end='')
|
||||
try:
|
||||
model_url = 'https://owncloud.gwdg.de/index.php/s/ioHbRzFx6th32hn/download'
|
||||
model_dest = 'src/clipseg/clipseg_weights.zip'
|
||||
weights_dir = 'src/clipseg/weights'
|
||||
if not os.path.exists(weights_dir):
|
||||
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
||||
urllib.request.urlretrieve(model_url,model_dest)
|
||||
with zipfile.ZipFile(model_dest,'r') as zip:
|
||||
zip.extractall('src/clipseg')
|
||||
os.rename('src/clipseg/clipseg_weights','src/clipseg/weights')
|
||||
os.remove(model_dest)
|
||||
from clipseg_models.clipseg import CLIPDensePredT
|
||||
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, )
|
||||
model.eval()
|
||||
model.load_state_dict(
|
||||
torch.load(
|
||||
'src/clipseg/weights/rd64-uni-refined.pth',
|
||||
map_location=torch.device('cpu')
|
||||
),
|
||||
strict=False,
|
||||
)
|
||||
except Exception:
|
||||
print('Error installing clipseg model:')
|
||||
print(traceback.format_exc())
|
||||
print('...success')
|
||||
|
||||
|
||||
|
2
setup.py
@ -2,7 +2,7 @@ from setuptools import setup, find_packages
|
||||
|
||||
setup(
|
||||
name='invoke-ai',
|
||||
version='2.0.0',
|
||||
version='2.0.2',
|
||||
description='',
|
||||
packages=find_packages(),
|
||||
install_requires=[
|
||||
|
46
tests/legacy_tests.sh
Executable file
@ -0,0 +1,46 @@
|
||||
#! /usr/bin/env bash
|
||||
|
||||
# This file contains bunch of compatibility tests that ensures
|
||||
# that the API interface of `scripts/legacy-api.py` remains stable
|
||||
|
||||
set -e
|
||||
|
||||
OUTDIR=$(mktemp -d)
|
||||
|
||||
echo "Using directory $OUTDIR"
|
||||
|
||||
# Start API
|
||||
python -u scripts/legacy_api.py --web --host=localhost --port=3333 --outdir=$OUTDIR &> $OUTDIR/sd.log &
|
||||
APP_PID=$!
|
||||
|
||||
echo "Wait for server to startup"
|
||||
|
||||
tail -f -n0 $OUTDIR/sd.log | grep -qe "Point your browser at"
|
||||
|
||||
echo "Started, continuing"
|
||||
|
||||
if [ $? == 1 ]; then
|
||||
echo "Search terminated without finding the pattern"
|
||||
fi
|
||||
|
||||
# Generate image
|
||||
RESULT=$(curl -v -X POST -d '{"index":0,"variation_amount":0,"with_variations":"","steps":25,"width":512,"seed":"1337","prompt":"A cat wearing a hat","strength":0.5,"initimg":null,"cfg_scale":2,"iterations":1,"upscale_level":0,"upscale_strength":0,"sampler_name":"k_euler","height":512}' localhost:3333 | grep result)
|
||||
|
||||
# Test 01 - Image contents
|
||||
FILENAME=$(echo $RESULT | jq -r .url)
|
||||
|
||||
ACTUAL_CHECKSUM=$(sha256sum $FILENAME)
|
||||
EXPECTED_CHECKSUM="a77799226a4dfc62a1674498e575c775da042959a4b90b13e26f666c302f079f"
|
||||
|
||||
if [ "$ACTUAL_CHECKSUM" != "$EXPECTED_CHECKSUM" ]; then
|
||||
echo "Expected hash $EXPECTED_CHECKSUM but got hash $ACTUAL_CHECKSUM"
|
||||
kill $APP_PID
|
||||
# rm -r $OUTDIR
|
||||
exit 33
|
||||
fi
|
||||
|
||||
# Assert output
|
||||
|
||||
# Cleanup
|
||||
kill $APP_PID
|
||||
# rm -r $OUTDIR
|