mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'development' into development
This commit is contained in:
commit
be99d5a4bd
@ -2,6 +2,15 @@
|
||||
|
||||
# InvokeAI: A Stable Diffusion Toolkit
|
||||
|
||||
_Note: This fork is rapidly evolving. Please use the
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to
|
||||
report bugs and make feature requests. Be sure to use the provided
|
||||
templates. They will help aid diagnose issues faster._
|
||||
|
||||
_This repository was formally known as lstein/stable-diffusion_
|
||||
|
||||
# **Table of Contents**
|
||||
|
||||

|
||||
|
||||
[![discord badge]][discord link]
|
||||
|
@ -103,6 +103,8 @@ socketio = SocketIO(
|
||||
engineio_logger=engineio_logger,
|
||||
max_http_buffer_size=max_http_buffer_size,
|
||||
cors_allowed_origins=cors_allowed_origins,
|
||||
ping_interval=(50, 50),
|
||||
ping_timeout=60,
|
||||
)
|
||||
|
||||
|
||||
@ -186,17 +188,50 @@ def handle_request_capabilities():
|
||||
socketio.emit("systemConfig", config)
|
||||
|
||||
|
||||
@socketio.on("requestAllImages")
|
||||
def handle_request_all_images():
|
||||
print(f">> All images requested")
|
||||
paths = list(filter(os.path.isfile, glob.glob(result_path + "*.png")))
|
||||
paths.sort(key=lambda x: os.path.getmtime(x))
|
||||
@socketio.on("requestImages")
|
||||
def handle_request_images(page=1, offset=0, last_mtime=None):
|
||||
chunk_size = 50
|
||||
|
||||
if last_mtime:
|
||||
print(f">> Latest images requested")
|
||||
else:
|
||||
print(
|
||||
f">> Page {page} of images requested (page size {chunk_size} offset {offset})"
|
||||
)
|
||||
|
||||
paths = glob.glob(os.path.join(result_path, "*.png"))
|
||||
sorted_paths = sorted(paths, key=lambda x: os.path.getmtime(x), reverse=True)
|
||||
|
||||
if last_mtime:
|
||||
image_paths = filter(lambda x: os.path.getmtime(x) > last_mtime, sorted_paths)
|
||||
else:
|
||||
|
||||
image_paths = sorted_paths[
|
||||
slice(chunk_size * (page - 1) + offset, chunk_size * page + offset)
|
||||
]
|
||||
page = page + 1
|
||||
|
||||
image_array = []
|
||||
for path in paths:
|
||||
|
||||
for path in image_paths:
|
||||
metadata = retrieve_metadata(path)
|
||||
image_array.append({"url": path, "metadata": metadata["sd-metadata"]})
|
||||
socketio.emit("galleryImages", {"images": image_array})
|
||||
eventlet.sleep(0)
|
||||
image_array.append(
|
||||
{
|
||||
"url": path,
|
||||
"mtime": os.path.getmtime(path),
|
||||
"metadata": metadata["sd-metadata"],
|
||||
}
|
||||
)
|
||||
|
||||
socketio.emit(
|
||||
"galleryImages",
|
||||
{
|
||||
"images": image_array,
|
||||
"nextPage": page,
|
||||
"offset": offset,
|
||||
"onlyNewImages": True if last_mtime else False,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@socketio.on("generateImage")
|
||||
@ -275,6 +310,7 @@ def handle_run_esrgan_event(original_image, esrgan_parameters):
|
||||
"esrganResult",
|
||||
{
|
||||
"url": os.path.relpath(path),
|
||||
"mtime": os.path.getmtime(path),
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
@ -343,6 +379,7 @@ def handle_run_gfpgan_event(original_image, gfpgan_parameters):
|
||||
"gfpganResult",
|
||||
{
|
||||
"url": os.path.relpath(path),
|
||||
"mtime": os.path.mtime(path),
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
@ -642,7 +679,11 @@ def generate_images(generation_parameters, esrgan_parameters, gfpgan_parameters)
|
||||
step_index += 1
|
||||
socketio.emit(
|
||||
"intermediateResult",
|
||||
{"url": os.path.relpath(path), "metadata": generation_parameters},
|
||||
{
|
||||
"url": os.path.relpath(path),
|
||||
"mtime": os.path.getmtime(path),
|
||||
"metadata": generation_parameters,
|
||||
},
|
||||
)
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
@ -670,6 +711,11 @@ def generate_images(generation_parameters, esrgan_parameters, gfpgan_parameters)
|
||||
first_seed = first_seed or seed
|
||||
this_variation = [[seed, all_parameters["variation_amount"]]]
|
||||
all_parameters["with_variations"] = prior_variations + this_variation
|
||||
all_parameters["seed"] = first_seed
|
||||
elif ("with_variations" in all_parameters):
|
||||
all_parameters["seed"] = first_seed
|
||||
else:
|
||||
all_parameters["seed"] = seed
|
||||
|
||||
if esrgan_parameters:
|
||||
progress["currentStatus"] = "Upscaling"
|
||||
@ -702,7 +748,6 @@ def generate_images(generation_parameters, esrgan_parameters, gfpgan_parameters)
|
||||
postprocessing = True
|
||||
all_parameters["gfpgan_strength"] = gfpgan_parameters["strength"]
|
||||
|
||||
all_parameters["seed"] = first_seed
|
||||
progress["currentStatus"] = "Saving image"
|
||||
socketio.emit("progressUpdate", progress)
|
||||
eventlet.sleep(0)
|
||||
@ -735,7 +780,11 @@ def generate_images(generation_parameters, esrgan_parameters, gfpgan_parameters)
|
||||
|
||||
socketio.emit(
|
||||
"generationResult",
|
||||
{"url": os.path.relpath(path), "metadata": metadata},
|
||||
{
|
||||
"url": os.path.relpath(path),
|
||||
"mtime": os.path.getmtime(path),
|
||||
"metadata": metadata,
|
||||
},
|
||||
)
|
||||
eventlet.sleep(0)
|
||||
|
||||
|
@ -103,157 +103,141 @@ overridden on a per-prompt basis (see [List of prompt arguments]
|
||||
|
||||
These arguments are deprecated but still work:
|
||||
|
||||
<figure markdown>
|
||||
|
||||
| Argument | Shortcut | Default | Description |
|
||||
| ------------------ | -------- | ------- | --------------------------------------------------------------- |
|
||||
| `--weights <path>` | | `None` | Pth to weights file; use `--model stable-diffusion-1.4` instead |
|
||||
| `--laion400m` | `-l` | `False` | Use older LAION400m weights; use `--model=laion400m` instead |
|
||||
| Argument | Shortcut | Default | Description |
|
||||
|--------------------|------------|---------------------|--------------|
|
||||
| --weights <path> | | None | Pth to weights file; use `--model stable-diffusion-1.4` instead |
|
||||
| --laion400m | -l | False | Use older LAION400m weights; use `--model=laion400m` instead |
|
||||
|
||||
</figure>
|
||||
**A note on path names:** On Windows systems, you may run into
|
||||
problems when passing the dream script standard backslashed path
|
||||
names because the Python interpreter treats "\" as an escape.
|
||||
You can either double your slashes (ick): C:\\\\path\\\\to\\\\my\\\\file, or
|
||||
use Linux/Mac style forward slashes (better): C:/path/to/my/file.
|
||||
|
||||
!!! note
|
||||
## List of prompt arguments
|
||||
|
||||
On Windows systems, you may run into problems when passing the dream script standard backslashed
|
||||
path names because the Python interpreter treats `\` as an escape. You can either double your
|
||||
slashes (ick): `C:\\path\\to\\my\\file`, or use Linux/Mac style forward slashes (better):
|
||||
`C:/path/to/my/file`.
|
||||
After the dream.py script initializes, it will present you with a
|
||||
**dream>** prompt. Here you can enter information to generate images
|
||||
from text (txt2img), to embellish an existing image or sketch
|
||||
(img2img), or to selectively alter chosen regions of the image
|
||||
(inpainting).
|
||||
|
||||
### List of prompt arguments
|
||||
### This is an example of txt2img:
|
||||
|
||||
After the `dream.py` script initializes, it will present you with a **`dream>`**
|
||||
prompt. Here you can enter information to generate images from text (txt2img),
|
||||
to embellish an existing image or sketch (img2img), or to selectively alter
|
||||
chosen regions of the image (inpainting).
|
||||
~~~~
|
||||
dream> waterfall and rainbow -W640 -H480
|
||||
~~~~
|
||||
|
||||
#### txt2img
|
||||
This will create the requested image with the dimensions 640 (width)
|
||||
and 480 (height).
|
||||
|
||||
!!! example
|
||||
Here are the dream> command that apply to txt2img:
|
||||
|
||||
```bash
|
||||
dream> "waterfall and rainbow" -W640 -H480
|
||||
```
|
||||
| Argument | Shortcut | Default | Description |
|
||||
|--------------------|------------|---------------------|--------------|
|
||||
| "my prompt" | | | Text prompt to use. The quotation marks are optional. |
|
||||
| --width <int> | -W<int> | 512 | Width of generated image |
|
||||
| --height <int> | -H<int> | 512 | Height of generated image |
|
||||
| --iterations <int> | -n<int> | 1 | How many images to generate from this prompt |
|
||||
| --steps <int> | -s<int> | 50 | How many steps of refinement to apply |
|
||||
| --cfg_scale <float>| -C<float> | 7.5 | How hard to try to match the prompt to the generated image; any number greater than 1.0 works, but the useful range is roughly 5.0 to 20.0 |
|
||||
| --seed <int> | -S<int> | None | Set the random seed for the next series of images. This can be used to recreate an image generated previously.|
|
||||
| --sampler <sampler>| -A<sampler>| k_lms | Sampler to use. Use -h to get list of available samplers. |
|
||||
| --grid | -g | False | Turn on grid mode to return a single image combining all the images generated by this prompt |
|
||||
| --individual | -i | True | Turn off grid mode (deprecated; leave off --grid instead) |
|
||||
| --outdir <path> | -o<path> | outputs/img_samples | Temporarily change the location of these images |
|
||||
| --seamless | | False | Activate seamless tiling for interesting effects |
|
||||
| --log_tokenization | -t | False | Display a color-coded list of the parsed tokens derived from the prompt |
|
||||
| --skip_normalization| -x | False | Weighted subprompts will not be normalized. See [Weighted Prompts](./OTHER.md#weighted-prompts) |
|
||||
| --upscale <int> <float> | -U <int> <float> | -U 1 0.75| Upscale image by magnification factor (2, 4), and set strength of upscaling (0.0-1.0). If strength not set, will default to 0.75. |
|
||||
| --gfpgan_strength <float> | -G <float> | -G0 | Fix faces using the GFPGAN algorithm; argument indicates how hard the algorithm should try (0.0-1.0) |
|
||||
| --save_original | -save_orig| False | When upscaling or fixing faces, this will cause the original image to be saved rather than replaced. |
|
||||
| --variation <float> |-v<float>| 0.0 | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with -S<seed> and -n<int> to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
|
||||
| --with_variations <pattern> | -V<pattern>| None | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
||||
|
||||
This will create the requested image with the dimensions 640 (width) and 480 (height).
|
||||
Note that the width and height of the image must be multiples of
|
||||
64. You can provide different values, but they will be rounded down to
|
||||
the nearest multiple of 64.
|
||||
|
||||
Those are the `dream` commands that apply to txt2img:
|
||||
|
||||
| Argument <img width="680" align="right"/> | Shortcut <img width="420" align="right"/> | Default <img width="480" align="right"/> | Description |
|
||||
| ----------------------------------------- | ----------------------------------------- | ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `"my prompt"` | | | Text prompt to use. The quotation marks are optional. |
|
||||
| `--width <int>` | `-W<int>` | `512` | Width of generated image |
|
||||
| `--height <int>` | `-H<int>` | `512` | Height of generated image |
|
||||
| `--iterations <int>` | `-n<int>` | `1` | How many images to generate from this prompt |
|
||||
| `--steps <int>` | `-s<int>` | `50` | How many steps of refinement to apply |
|
||||
| `--cfg_scale <float>` | `-C<float>` | `7.5` | How hard to try to match the prompt to the generated image; any number greater than 0.0 works, but the useful range is roughly 5.0 to 20.0 |
|
||||
| `--seed <int>` | `-S<int>` | `None` | Set the random seed for the next series of images. This can be used to recreate an image generated previously. |
|
||||
| `--sampler <sampler>` | `-A<sampler>` | `k_lms` | Sampler to use. Use `-h` to get list of available samplers. |
|
||||
| `--grid` | `-g` | `False` | Turn on grid mode to return a single image combining all the images generated by this prompt |
|
||||
| `--individual` | `-i` | `True` | Turn off grid mode (deprecated; leave off `--grid` instead) |
|
||||
| `--outdir <path>` | `-o<path>` | `outputs/img_samples` | Temporarily change the location of these images |
|
||||
| `--seamless` | | `False` | Activate seamless tiling for interesting effects |
|
||||
| `--log_tokenization` | `-t` | `False` | Display a color-coded list of the parsed tokens derived from the prompt |
|
||||
| `--skip_normalization` | `-x` | `False` | Weighted subprompts will not be normalized. See [Weighted Prompts](./OTHER.md#weighted-prompts) |
|
||||
| `--upscale <int> <float>` | `-U <int> <float>` | `-U 1 0.75` | Upscale image by magnification factor (2, 4), and set strength of upscaling (0.0-1.0). If strength not set, will default to 0.75. |
|
||||
| `--gfpgan_strength <float>` | `-G <float>` | `-G0` | Fix faces using the GFPGAN algorithm; argument indicates how hard the algorithm should try (0.0-1.0) |
|
||||
| `--save_original` | `-save_orig` | `False` | When upscaling or fixing faces, this will cause the original image to be saved rather than replaced. |
|
||||
| `--variation <float>` | `-v<float>` | `0.0` | Add a bit of noise (0.0=none, 1.0=high) to the image in order to generate a series of variations. Usually used in combination with `-S<seed>` and `-n<int>` to generate a series a riffs on a starting image. See [Variations](./VARIATIONS.md). |
|
||||
| `--with_variations <pattern>` | `-V<pattern>` | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
||||
### This is an example of img2img:
|
||||
|
||||
!!! note
|
||||
~~~~
|
||||
dream> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
|
||||
~~~~
|
||||
|
||||
The width and height of the image must be multiples of 64. You can provide different
|
||||
values, but they will be rounded down to the nearest multiple of 64.
|
||||
This will modify the indicated vacation photograph by making it more
|
||||
like the prompt. Results will vary greatly depending on what is in the
|
||||
image. We also ask to --fit the image into a box no bigger than
|
||||
640x480. Otherwise the image size will be identical to the provided
|
||||
photo and you may run out of memory if it is large.
|
||||
|
||||
#### img2img
|
||||
In addition to the command-line options recognized by txt2img, img2img
|
||||
accepts additional options:
|
||||
|
||||
!!! example
|
||||
| Argument | Shortcut | Default | Description |
|
||||
|--------------------|------------|---------------------|--------------|
|
||||
| --init_img <path> | -I<path> | None | Path to the initialization image |
|
||||
| --fit | -F | False | Scale the image to fit into the specified -H and -W dimensions |
|
||||
| --strength <float> | -s<float> | 0.75 | How hard to try to match the prompt to the initial image. Ranges from 0.0-0.99, with higher values replacing the initial image completely.|
|
||||
|
||||
```bash
|
||||
dream> "waterfall and rainbow" -I./vacation-photo.png -W640 -H480 --fit
|
||||
```
|
||||
### This is an example of inpainting:
|
||||
|
||||
This will modify the indicated vacation photograph by making it more like the prompt. Results will
|
||||
vary greatly depending on what is in the image. We also ask to --fit the image into a box no bigger
|
||||
than 640x480. Otherwise the image size will be identical to the provided photo and you may run out
|
||||
of memory if it is large.
|
||||
~~~~
|
||||
dream> waterfall and rainbow -I./vacation-photo.png -M./vacation-mask.png -W640 -H480 --fit
|
||||
~~~~
|
||||
|
||||
Repeated chaining of img2img on an image can result in significant color shifts
|
||||
in the output, especially if run with lower strength. Color correction can be
|
||||
run against a reference image to fix this issue. Use the original input image to
|
||||
the chain as the the reference image for each step in the chain.
|
||||
This will do the same thing as img2img, but image alterations will
|
||||
only occur within transparent areas defined by the mask file specified
|
||||
by -M. You may also supply just a single initial image with the areas
|
||||
to overpaint made transparent, but you must be careful not to destroy
|
||||
the pixels underneath when you create the transparent areas. See
|
||||
[Inpainting](./INPAINTING.md) for details.
|
||||
|
||||
In addition to the command-line options recognized by txt2img, img2img accepts
|
||||
additional options:
|
||||
inpainting accepts all the arguments used for txt2img and img2img, as
|
||||
well as the --mask (-M) argument:
|
||||
|
||||
| Argument <img width="160" align="right"/> | Shortcut | Default | Description |
|
||||
| ----------------------------------------- | ----------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| `--init_img <path>` | `-I<path>` | `None` | Path to the initialization image |
|
||||
| `--init_color <path>` | | `None` | Path to reference image for color correction |
|
||||
| `--fit` | `-F` | `False` | Scale the image to fit into the specified -H and -W dimensions |
|
||||
| `--strength <float>` | `-f<float>` | `0.75` | How hard to try to match the prompt to the initial image. Ranges from 0.0-0.99, with higher values replacing the initial image completely. |
|
||||
| Argument | Shortcut | Default | Description |
|
||||
|--------------------|------------|---------------------|--------------|
|
||||
| --init_mask <path> | -M<path> | None |Path to an image the same size as the initial_image, with areas for inpainting made transparent.|
|
||||
|
||||
#### Inpainting
|
||||
|
||||
!!! example
|
||||
# Command-line editing and completion
|
||||
|
||||
```bash
|
||||
dream> "waterfall and rainbow" -I./vacation-photo.png -M./vacation-mask.png -W640 -H480 --fit
|
||||
```
|
||||
If you are on a Macintosh or Linux machine, the command-line offers
|
||||
convenient history tracking, editing, and command completion.
|
||||
|
||||
This will do the same thing as img2img, but image alterations will only occur within transparent
|
||||
areas defined by the mask file specified by `-M`. You may also supply just a single initial image with
|
||||
the areas to overpaint made transparent, but you must be careful not to destroy the pixels
|
||||
underneath when you create the transparent areas. See [Inpainting](./INPAINTING.md) for details.
|
||||
- To scroll through previous commands and potentially edit/reuse them, use the up and down cursor keys.
|
||||
- To edit the current command, use the left and right cursor keys to position the cursor, and then backspace, delete or insert characters.
|
||||
- To move to the very beginning of the command, type CTRL-A (or command-A on the Mac)
|
||||
- To move to the end of the command, type CTRL-E.
|
||||
- To cut a section of the command, position the cursor where you want to start cutting and type CTRL-K.
|
||||
- To paste a cut section back in, position the cursor where you want to paste, and type CTRL-Y
|
||||
|
||||
Inpainting accepts all the arguments used for txt2img and img2img, as well as
|
||||
the `--mask` (`-M`) argument:
|
||||
Windows users can get similar, but more limited, functionality if they
|
||||
launch dream.py with the "winpty" program:
|
||||
|
||||
| Argument <img width="100" align="right"/> | Shortcut | Default | Description |
|
||||
| ----------------------------------------- | ---------- | ------- | ------------------------------------------------------------------------------------------------ |
|
||||
| `--init_mask <path>` | `-M<path>` | `None` | Path to an image the same size as the initial_image, with areas for inpainting made transparent. |
|
||||
~~~
|
||||
> winpty python scripts\dream.py
|
||||
~~~
|
||||
|
||||
## Command-line editing and completion
|
||||
On the Mac and Linux platforms, when you exit dream.py, the last 1000
|
||||
lines of your command-line history will be saved. When you restart
|
||||
dream.py, you can access the saved history using the up-arrow key.
|
||||
|
||||
If you are on a Macintosh or Linux machine, the command-line offers convenient
|
||||
history tracking, editing, and command completion.
|
||||
In addition, limited command-line completion is installed. In various
|
||||
contexts, you can start typing your command and press tab. A list of
|
||||
potential completions will be presented to you. You can then type a
|
||||
little more, hit tab again, and eventually autocomplete what you want.
|
||||
|
||||
- To scroll through previous commands and potentially edit/reuse them, use the
|
||||
++up++ and ++down++ cursor keys.
|
||||
- To edit the current command, use the ++left++ and ++right++ cursor keys to
|
||||
position the cursor, and then ++backspace++, ++delete++ or ++insert++
|
||||
characters.
|
||||
- To move to the very beginning of the command, type ++ctrl+a++ (or
|
||||
++command+a++ on the Mac)
|
||||
- To move to the end of the command, type ++ctrl+e++.
|
||||
- To cut a section of the command, position the cursor where you want to start
|
||||
cutting and type ++ctrl+k++.
|
||||
- To paste a cut section back in, position the cursor where you want to paste,
|
||||
and type ++ctrl+y++
|
||||
When specifying file paths using the one-letter shortcuts, the CLI
|
||||
will attempt to complete pathnames for you. This is most handy for the
|
||||
-I (init image) and -M (init mask) paths. To initiate completion, start
|
||||
the path with a slash ("/") or "./". For example:
|
||||
|
||||
Windows users can get similar, but more limited, functionality if they launch
|
||||
`dream.py` with the "winpty" program:
|
||||
|
||||
```batch
|
||||
winpty python scripts\dream.py
|
||||
```
|
||||
|
||||
On the Mac and Linux platforms, when you exit `dream.py`, the last 1000 lines of
|
||||
your command-line history will be saved. When you restart `dream.py`, you can
|
||||
access the saved history using the ++up++ key.
|
||||
|
||||
In addition, limited command-line completion is installed. In various contexts,
|
||||
you can start typing your command and press tab. A list of potential completions
|
||||
will be presented to you. You can then type a little more, hit tab again, and
|
||||
eventually autocomplete what you want.
|
||||
|
||||
When specifying file paths using the one-letter shortcuts, the CLI will attempt
|
||||
to complete pathnames for you. This is most handy for the `-I` (init image) and
|
||||
`-M` (init mask) paths. To initiate completion, start the path with a slash `/`
|
||||
or `./`, for example:
|
||||
|
||||
```bash
|
||||
dream> "zebra with a mustache" -I./test-pictures<TAB>
|
||||
~~~
|
||||
dream> zebra with a mustache -I./test-pictures<TAB>
|
||||
-I./test-pictures/Lincoln-and-Parrot.png -I./test-pictures/zebra.jpg -I./test-pictures/madonna.png
|
||||
-I./test-pictures/bad-sketch.png -I./test-pictures/man_with_eagle/
|
||||
```
|
||||
|
@ -15,12 +15,37 @@ incomplete installations or crashes during the install process.
|
||||
|
||||
During `conda env create -f environment.yaml`, conda hangs indefinitely.
|
||||
|
||||
### **SOLUTION**
|
||||
If it is because of the last PIP step (usually stuck in the Git Clone step, you can check the detailed log by this method):
|
||||
```bash
|
||||
export PIP_LOG="/tmp/pip_log.txt"
|
||||
touch ${PIP_LOG}
|
||||
tail -f ${PIP_LOG} &
|
||||
conda env create -f environment-mac.yaml --debug --verbose
|
||||
killall tail
|
||||
rm ${PIP_LOG}
|
||||
```
|
||||
|
||||
Enter the stable-diffusion directory and completely remove the `src` directory and all its contents.
|
||||
The safest way to do this is to enter the stable-diffusion directory and give the command
|
||||
`git clean -f`. If this still doesn't fix the problem, try "conda clean -all" and then restart at
|
||||
the `conda env create` step.
|
||||
**SOLUTION**
|
||||
|
||||
Conda sometimes gets stuck at the last PIP step, in which several git repositories are
|
||||
cloned and built.
|
||||
|
||||
Enter the stable-diffusion directory and completely remove the `src`
|
||||
directory and all its contents. The safest way to do this is to enter
|
||||
the stable-diffusion directory and give the command `git clean -f`. If
|
||||
this still doesn't fix the problem, try "conda clean -all" and then
|
||||
restart at the `conda env create` step.
|
||||
|
||||
To further understand the problem to checking the install lot using this method:
|
||||
|
||||
```bash
|
||||
export PIP_LOG="/tmp/pip_log.txt"
|
||||
touch ${PIP_LOG}
|
||||
tail -f ${PIP_LOG} &
|
||||
conda env create -f environment-mac.yaml --debug --verbose
|
||||
killall tail
|
||||
rm ${PIP_LOG}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
|
@ -95,7 +95,8 @@ While that is downloading, open a Terminal and run the following commands:
|
||||
```{.bash .annotate title="local repo setup"}
|
||||
# clone the repo
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
cd stable-diffusion
|
||||
|
||||
cd InvokeAI
|
||||
|
||||
# wait until the checkpoint file has downloaded, then proceed
|
||||
|
||||
@ -124,7 +125,7 @@ ln -s "$PATH_TO_CKPT/sd-v1-4.ckpt" \
|
||||
=== "Intel x86_64"
|
||||
|
||||
```bash
|
||||
PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-x86_64 \
|
||||
PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-64 \
|
||||
conda env create \
|
||||
-f environment-mac.yaml \
|
||||
&& conda activate ldm
|
||||
|
@ -30,6 +30,7 @@ dependencies:
|
||||
- nomkl
|
||||
- numpy==1.23.2
|
||||
- omegaconf==2.1.1
|
||||
- openh264==2.3.0
|
||||
- onnx==1.12.0
|
||||
- onnxruntime==1.12.1
|
||||
- protobuf==3.20.1
|
||||
|
694
frontend/dist/assets/index.66192cce.js
vendored
Normal file
694
frontend/dist/assets/index.66192cce.js
vendored
Normal file
File diff suppressed because one or more lines are too long
@ -10,15 +10,13 @@ import PromptInput from '../features/options/PromptInput';
|
||||
import LogViewer from '../features/system/LogViewer';
|
||||
import Loading from '../Loading';
|
||||
import { useAppDispatch } from './store';
|
||||
import { requestAllImages, requestSystemConfig } from './socketio/actions';
|
||||
import { requestSystemConfig } from './socketio/actions';
|
||||
|
||||
const App = () => {
|
||||
const dispatch = useAppDispatch();
|
||||
const [isReady, setIsReady] = useState<boolean>(false);
|
||||
|
||||
// Load images from the gallery once
|
||||
useEffect(() => {
|
||||
dispatch(requestAllImages());
|
||||
dispatch(requestSystemConfig());
|
||||
setIsReady(true);
|
||||
}, [dispatch]);
|
||||
|
7
frontend/src/app/invokeai.d.ts
vendored
7
frontend/src/app/invokeai.d.ts
vendored
@ -107,6 +107,7 @@ export declare type Metadata = SystemConfig & {
|
||||
export declare type Image = {
|
||||
uuid: string;
|
||||
url: string;
|
||||
mtime: number;
|
||||
metadata: Metadata;
|
||||
};
|
||||
|
||||
@ -148,6 +149,7 @@ export declare type SystemConfigResponse = SystemConfig;
|
||||
|
||||
export declare type ImageResultResponse = {
|
||||
url: string;
|
||||
mtime: number;
|
||||
metadata: Metadata;
|
||||
};
|
||||
|
||||
@ -157,7 +159,10 @@ export declare type ErrorResponse = {
|
||||
};
|
||||
|
||||
export declare type GalleryImagesResponse = {
|
||||
images: Array<{ url: string; metadata: Metadata }>;
|
||||
images: Array<Omit<Image, 'uuid'>>;
|
||||
nextPage: number;
|
||||
offset: number;
|
||||
onlyNewImages: boolean;
|
||||
};
|
||||
|
||||
export declare type ImageUrlAndUuidResponse = {
|
||||
|
@ -12,8 +12,11 @@ export const generateImage = createAction<undefined>('socketio/generateImage');
|
||||
export const runESRGAN = createAction<InvokeAI.Image>('socketio/runESRGAN');
|
||||
export const runGFPGAN = createAction<InvokeAI.Image>('socketio/runGFPGAN');
|
||||
export const deleteImage = createAction<InvokeAI.Image>('socketio/deleteImage');
|
||||
export const requestAllImages = createAction<undefined>(
|
||||
'socketio/requestAllImages'
|
||||
export const requestImages = createAction<undefined>(
|
||||
'socketio/requestImages'
|
||||
);
|
||||
export const requestNewImages = createAction<undefined>(
|
||||
'socketio/requestNewImages'
|
||||
);
|
||||
export const cancelProcessing = createAction<undefined>(
|
||||
'socketio/cancelProcessing'
|
||||
@ -23,4 +26,6 @@ export const uploadInitialImage = createAction<File>(
|
||||
);
|
||||
export const uploadMaskImage = createAction<File>('socketio/uploadMaskImage');
|
||||
|
||||
export const requestSystemConfig = createAction<undefined>('socketio/requestSystemConfig');
|
||||
export const requestSystemConfig = createAction<undefined>(
|
||||
'socketio/requestSystemConfig'
|
||||
);
|
||||
|
@ -83,8 +83,17 @@ const makeSocketIOEmitters = (
|
||||
const { url, uuid } = imageToDelete;
|
||||
socketio.emit('deleteImage', url, uuid);
|
||||
},
|
||||
emitRequestAllImages: () => {
|
||||
socketio.emit('requestAllImages');
|
||||
emitRequestImages: () => {
|
||||
const { nextPage, offset } = getState().gallery;
|
||||
socketio.emit('requestImages', nextPage, offset);
|
||||
},
|
||||
emitRequestNewImages: () => {
|
||||
const { nextPage, offset, images } = getState().gallery;
|
||||
if (images.length > 0) {
|
||||
socketio.emit('requestImages', nextPage, offset, images[0].mtime);
|
||||
} else {
|
||||
socketio.emit('requestImages', nextPage, offset);
|
||||
}
|
||||
},
|
||||
emitCancelProcessing: () => {
|
||||
socketio.emit('cancel');
|
||||
@ -96,8 +105,8 @@ const makeSocketIOEmitters = (
|
||||
socketio.emit('uploadMaskImage', file, file.name);
|
||||
},
|
||||
emitRequestSystemConfig: () => {
|
||||
socketio.emit('requestSystemConfig')
|
||||
}
|
||||
socketio.emit('requestSystemConfig');
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -14,10 +14,10 @@ import {
|
||||
} from '../../features/system/systemSlice';
|
||||
|
||||
import {
|
||||
addGalleryImages,
|
||||
addImage,
|
||||
clearIntermediateImage,
|
||||
removeImage,
|
||||
setGalleryImages,
|
||||
setIntermediateImage,
|
||||
} from '../../features/gallery/gallerySlice';
|
||||
|
||||
@ -25,6 +25,7 @@ import {
|
||||
setInitialImagePath,
|
||||
setMaskPath,
|
||||
} from '../../features/options/optionsSlice';
|
||||
import { requestNewImages } from './actions';
|
||||
|
||||
/**
|
||||
* Returns an object containing listener callbacks for socketio events.
|
||||
@ -43,6 +44,7 @@ const makeSocketIOListeners = (
|
||||
try {
|
||||
dispatch(setIsConnected(true));
|
||||
dispatch(setCurrentStatus('Connected'));
|
||||
dispatch(requestNewImages());
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
@ -53,7 +55,6 @@ const makeSocketIOListeners = (
|
||||
onDisconnect: () => {
|
||||
try {
|
||||
dispatch(setIsConnected(false));
|
||||
dispatch(setIsProcessing(false));
|
||||
dispatch(setCurrentStatus('Disconnected'));
|
||||
|
||||
dispatch(
|
||||
@ -72,13 +73,14 @@ const makeSocketIOListeners = (
|
||||
*/
|
||||
onGenerationResult: (data: InvokeAI.ImageResultResponse) => {
|
||||
try {
|
||||
const { url, metadata } = data;
|
||||
const { url, mtime, metadata } = data;
|
||||
const newUuid = uuidv4();
|
||||
|
||||
dispatch(
|
||||
addImage({
|
||||
uuid: newUuid,
|
||||
url,
|
||||
mtime,
|
||||
metadata: metadata,
|
||||
})
|
||||
);
|
||||
@ -99,11 +101,12 @@ const makeSocketIOListeners = (
|
||||
onIntermediateResult: (data: InvokeAI.ImageResultResponse) => {
|
||||
try {
|
||||
const uuid = uuidv4();
|
||||
const { url, metadata } = data;
|
||||
const { url, metadata, mtime } = data;
|
||||
dispatch(
|
||||
setIntermediateImage({
|
||||
uuid,
|
||||
url,
|
||||
mtime,
|
||||
metadata,
|
||||
})
|
||||
);
|
||||
@ -123,12 +126,13 @@ const makeSocketIOListeners = (
|
||||
*/
|
||||
onESRGANResult: (data: InvokeAI.ImageResultResponse) => {
|
||||
try {
|
||||
const { url, metadata } = data;
|
||||
const { url, metadata, mtime } = data;
|
||||
|
||||
dispatch(
|
||||
addImage({
|
||||
uuid: uuidv4(),
|
||||
url,
|
||||
mtime,
|
||||
metadata,
|
||||
})
|
||||
);
|
||||
@ -149,12 +153,13 @@ const makeSocketIOListeners = (
|
||||
*/
|
||||
onGFPGANResult: (data: InvokeAI.ImageResultResponse) => {
|
||||
try {
|
||||
const { url, metadata } = data;
|
||||
const { url, metadata, mtime } = data;
|
||||
|
||||
dispatch(
|
||||
addImage({
|
||||
uuid: uuidv4(),
|
||||
url,
|
||||
mtime,
|
||||
metadata,
|
||||
})
|
||||
);
|
||||
@ -209,16 +214,26 @@ const makeSocketIOListeners = (
|
||||
* Callback to run when we receive a 'galleryImages' event.
|
||||
*/
|
||||
onGalleryImages: (data: InvokeAI.GalleryImagesResponse) => {
|
||||
const { images } = data;
|
||||
const { images, nextPage, offset } = data;
|
||||
|
||||
/**
|
||||
* the logic here ideally would be in the reducer but we have a side effect:
|
||||
* generating a uuid. so the logic needs to be here, outside redux.
|
||||
*/
|
||||
|
||||
// Generate a UUID for each image
|
||||
const preparedImages = images.map((image): InvokeAI.Image => {
|
||||
const { url, metadata } = image;
|
||||
const { url, metadata, mtime } = image;
|
||||
return {
|
||||
uuid: uuidv4(),
|
||||
url,
|
||||
mtime,
|
||||
metadata,
|
||||
};
|
||||
});
|
||||
dispatch(setGalleryImages(preparedImages));
|
||||
|
||||
dispatch(addGalleryImages({ images: preparedImages, nextPage, offset }));
|
||||
|
||||
dispatch(
|
||||
addLogEntry({
|
||||
timestamp: dateFormat(new Date(), 'isoDateTime'),
|
||||
|
@ -24,7 +24,9 @@ import * as InvokeAI from '../invokeai';
|
||||
export const socketioMiddleware = () => {
|
||||
const { hostname, port } = new URL(window.location.href);
|
||||
|
||||
const socketio = io(`http://${hostname}:9090`);
|
||||
const socketio = io(`http://${hostname}:9090`, {
|
||||
timeout: 60000,
|
||||
});
|
||||
|
||||
let areListenersSet = false;
|
||||
|
||||
@ -51,7 +53,8 @@ export const socketioMiddleware = () => {
|
||||
emitRunESRGAN,
|
||||
emitRunGFPGAN,
|
||||
emitDeleteImage,
|
||||
emitRequestAllImages,
|
||||
emitRequestImages,
|
||||
emitRequestNewImages,
|
||||
emitCancelProcessing,
|
||||
emitUploadInitialImage,
|
||||
emitUploadMaskImage,
|
||||
@ -140,11 +143,17 @@ export const socketioMiddleware = () => {
|
||||
break;
|
||||
}
|
||||
|
||||
case 'socketio/requestAllImages': {
|
||||
emitRequestAllImages();
|
||||
case 'socketio/requestImages': {
|
||||
emitRequestImages();
|
||||
break;
|
||||
}
|
||||
|
||||
case 'socketio/requestNewImages': {
|
||||
emitRequestNewImages();
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
case 'socketio/cancelProcessing': {
|
||||
emitCancelProcessing();
|
||||
break;
|
||||
|
@ -1,5 +1,6 @@
|
||||
import { Center, Flex, Text } from '@chakra-ui/react';
|
||||
import { RootState } from '../../app/store';
|
||||
import { Button, Center, Flex, Text } from '@chakra-ui/react';
|
||||
import { requestImages } from '../../app/socketio/actions';
|
||||
import { RootState, useAppDispatch } from '../../app/store';
|
||||
import { useAppSelector } from '../../app/store';
|
||||
import HoverableImage from './HoverableImage';
|
||||
|
||||
@ -10,7 +11,7 @@ const ImageGallery = () => {
|
||||
const { images, currentImageUuid } = useAppSelector(
|
||||
(state: RootState) => state.gallery
|
||||
);
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
/**
|
||||
* I don't like that this needs to rerender whenever the current image is changed.
|
||||
* What if we have a large number of images? I suppose pagination (planned) will
|
||||
@ -19,15 +20,22 @@ const ImageGallery = () => {
|
||||
* TODO: Refactor if performance complaints, or after migrating to new API which supports pagination.
|
||||
*/
|
||||
|
||||
const handleClickLoadMore = () => {
|
||||
dispatch(requestImages());
|
||||
};
|
||||
|
||||
return images.length ? (
|
||||
<Flex gap={2} wrap="wrap" pb={2}>
|
||||
{[...images].reverse().map((image) => {
|
||||
const { uuid } = image;
|
||||
const isSelected = currentImageUuid === uuid;
|
||||
return (
|
||||
<HoverableImage key={uuid} image={image} isSelected={isSelected} />
|
||||
);
|
||||
})}
|
||||
<Flex direction={'column'} gap={2} pb={2}>
|
||||
<Flex gap={2} wrap="wrap">
|
||||
{images.map((image) => {
|
||||
const { uuid } = image;
|
||||
const isSelected = currentImageUuid === uuid;
|
||||
return (
|
||||
<HoverableImage key={uuid} image={image} isSelected={isSelected} />
|
||||
);
|
||||
})}
|
||||
</Flex>
|
||||
<Button onClick={handleClickLoadMore}>Load more...</Button>
|
||||
</Flex>
|
||||
) : (
|
||||
<Center height={'100%'} position={'relative'}>
|
||||
|
@ -8,11 +8,15 @@ export interface GalleryState {
|
||||
currentImageUuid: string;
|
||||
images: Array<InvokeAI.Image>;
|
||||
intermediateImage?: InvokeAI.Image;
|
||||
nextPage: number;
|
||||
offset: number;
|
||||
}
|
||||
|
||||
const initialState: GalleryState = {
|
||||
currentImageUuid: '',
|
||||
images: [],
|
||||
nextPage: 1,
|
||||
offset: 0,
|
||||
};
|
||||
|
||||
export const gallerySlice = createSlice({
|
||||
@ -50,7 +54,7 @@ export const gallerySlice = createSlice({
|
||||
* Clamp the new index to ensure it is valid..
|
||||
*/
|
||||
const newCurrentImageIndex = clamp(
|
||||
imageToDeleteIndex - 1,
|
||||
imageToDeleteIndex,
|
||||
0,
|
||||
newImages.length - 1
|
||||
);
|
||||
@ -67,10 +71,11 @@ export const gallerySlice = createSlice({
|
||||
state.images = newImages;
|
||||
},
|
||||
addImage: (state, action: PayloadAction<InvokeAI.Image>) => {
|
||||
state.images.push(action.payload);
|
||||
state.images.unshift(action.payload);
|
||||
state.currentImageUuid = action.payload.uuid;
|
||||
state.intermediateImage = undefined;
|
||||
state.currentImage = action.payload;
|
||||
state.offset += 1
|
||||
},
|
||||
setIntermediateImage: (state, action: PayloadAction<InvokeAI.Image>) => {
|
||||
state.intermediateImage = action.payload;
|
||||
@ -78,13 +83,24 @@ export const gallerySlice = createSlice({
|
||||
clearIntermediateImage: (state) => {
|
||||
state.intermediateImage = undefined;
|
||||
},
|
||||
setGalleryImages: (state, action: PayloadAction<Array<InvokeAI.Image>>) => {
|
||||
const newImages = action.payload;
|
||||
if (newImages.length) {
|
||||
const newCurrentImage = newImages[newImages.length - 1];
|
||||
state.images = newImages;
|
||||
addGalleryImages: (
|
||||
state,
|
||||
action: PayloadAction<{
|
||||
images: Array<InvokeAI.Image>;
|
||||
nextPage: number;
|
||||
offset: number;
|
||||
}>
|
||||
) => {
|
||||
const { images, nextPage, offset } = action.payload;
|
||||
if (images.length) {
|
||||
const newCurrentImage = images[0];
|
||||
state.images = state.images
|
||||
.concat(images)
|
||||
.sort((a, b) => b.mtime - a.mtime);
|
||||
state.currentImage = newCurrentImage;
|
||||
state.currentImageUuid = newCurrentImage.uuid;
|
||||
state.nextPage = nextPage;
|
||||
state.offset = offset;
|
||||
}
|
||||
},
|
||||
},
|
||||
@ -95,7 +111,7 @@ export const {
|
||||
clearIntermediateImage,
|
||||
removeImage,
|
||||
setCurrentImage,
|
||||
setGalleryImages,
|
||||
addGalleryImages,
|
||||
setIntermediateImage,
|
||||
} = gallerySlice.actions;
|
||||
|
||||
|
@ -32,8 +32,16 @@ import { cloneElement, ReactElement } from 'react';
|
||||
const systemSelector = createSelector(
|
||||
(state: RootState) => state.system,
|
||||
(system: SystemState) => {
|
||||
const { shouldDisplayInProgress, shouldConfirmOnDelete, shouldDisplayGuides } = system;
|
||||
return { shouldDisplayInProgress, shouldConfirmOnDelete, shouldDisplayGuides };
|
||||
const {
|
||||
shouldDisplayInProgress,
|
||||
shouldConfirmOnDelete,
|
||||
shouldDisplayGuides,
|
||||
} = system;
|
||||
return {
|
||||
shouldDisplayInProgress,
|
||||
shouldConfirmOnDelete,
|
||||
shouldDisplayGuides,
|
||||
};
|
||||
},
|
||||
{
|
||||
memoizeOptions: { resultEqualityCheck: isEqual },
|
||||
@ -64,8 +72,11 @@ const SettingsModal = ({ children }: SettingsModalProps) => {
|
||||
onClose: onRefreshModalClose,
|
||||
} = useDisclosure();
|
||||
|
||||
const { shouldDisplayInProgress, shouldConfirmOnDelete, shouldDisplayGuides } =
|
||||
useAppSelector(systemSelector);
|
||||
const {
|
||||
shouldDisplayInProgress,
|
||||
shouldConfirmOnDelete,
|
||||
shouldDisplayGuides,
|
||||
} = useAppSelector(systemSelector);
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
import { createSlice } from '@reduxjs/toolkit';
|
||||
import type { PayloadAction } from '@reduxjs/toolkit';
|
||||
import { ExpandedIndex } from '@chakra-ui/react';
|
||||
import * as InvokeAI from '../../app/invokeai'
|
||||
import * as InvokeAI from '../../app/invokeai';
|
||||
|
||||
export type LogLevel = 'info' | 'warning' | 'error';
|
||||
|
||||
@ -15,7 +15,9 @@ export interface Log {
|
||||
[index: number]: LogEntry;
|
||||
}
|
||||
|
||||
export interface SystemState extends InvokeAI.SystemStatus, InvokeAI.SystemConfig {
|
||||
export interface SystemState
|
||||
extends InvokeAI.SystemStatus,
|
||||
InvokeAI.SystemConfig {
|
||||
shouldDisplayInProgress: boolean;
|
||||
log: Array<LogEntry>;
|
||||
shouldShowLogViewer: boolean;
|
||||
@ -31,7 +33,6 @@ export interface SystemState extends InvokeAI.SystemStatus, InvokeAI.SystemConfi
|
||||
totalIterations: number;
|
||||
currentStatus: string;
|
||||
currentStatusHasSteps: boolean;
|
||||
|
||||
shouldDisplayGuides: boolean;
|
||||
}
|
||||
|
||||
@ -51,7 +52,7 @@ const initialSystemState = {
|
||||
totalSteps: 0,
|
||||
currentIteration: 0,
|
||||
totalIterations: 0,
|
||||
currentStatus: '',
|
||||
currentStatus: 'Disconnected',
|
||||
currentStatusHasSteps: false,
|
||||
model: '',
|
||||
model_id: '',
|
||||
@ -107,6 +108,12 @@ export const systemSlice = createSlice({
|
||||
},
|
||||
setIsConnected: (state, action: PayloadAction<boolean>) => {
|
||||
state.isConnected = action.payload;
|
||||
state.isProcessing = false;
|
||||
state.currentStep = 0;
|
||||
state.totalSteps = 0;
|
||||
state.currentIteration = 0;
|
||||
state.totalIterations = 0;
|
||||
state.currentStatusHasSteps = false;
|
||||
},
|
||||
setSocketId: (state, action: PayloadAction<string>) => {
|
||||
state.socketId = action.payload;
|
||||
|
@ -339,6 +339,12 @@ class Args(object):
|
||||
action='store_true',
|
||||
help='Deprecated way to set --precision=float32',
|
||||
)
|
||||
model_group.add_argument(
|
||||
'--free_gpu_mem',
|
||||
dest='free_gpu_mem',
|
||||
action='store_true',
|
||||
help='Force free gpu memory before final decoding',
|
||||
)
|
||||
model_group.add_argument(
|
||||
'--precision',
|
||||
dest='precision',
|
||||
@ -588,7 +594,7 @@ class Args(object):
|
||||
'--upscale',
|
||||
nargs='+',
|
||||
type=float,
|
||||
help='Scale factor (2, 4) for upscaling final output followed by upscaling strength (0-1.0). If strength not specified, defaults to 0.75',
|
||||
help='Scale factor (1, 2, 3, 4, etc..) for upscaling final output followed by upscaling strength (0-1.0). If strength not specified, defaults to 0.75',
|
||||
default=None,
|
||||
)
|
||||
postprocessing_group.add_argument(
|
||||
|
@ -4,18 +4,42 @@ and generates with ldm.dream.generator.img2img
|
||||
'''
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from ldm.dream.generator.base import Generator
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.dream.generator.img2img import Img2Img
|
||||
|
||||
import numpy as np
|
||||
from tqdm import trange
|
||||
from PIL import Image
|
||||
from ldm.dream.generator.base import Generator
|
||||
from ldm.dream.generator.img2img import Img2Img
|
||||
from ldm.dream.devices import choose_autocast
|
||||
|
||||
class Embiggen(Generator):
|
||||
def __init__(self, model, precision):
|
||||
super().__init__(model, precision)
|
||||
self.init_latent = None
|
||||
|
||||
# Replace generate because Embiggen doesn't need/use most of what it does normallly
|
||||
def generate(self,prompt,iterations=1,seed=None,
|
||||
image_callback=None, step_callback=None,
|
||||
**kwargs):
|
||||
scope = choose_autocast(self.precision)
|
||||
make_image = self.get_make_image(
|
||||
prompt,
|
||||
step_callback = step_callback,
|
||||
**kwargs
|
||||
)
|
||||
results = []
|
||||
seed = seed if seed else self.new_seed()
|
||||
|
||||
# Noise will be generated by the Img2Img generator when called
|
||||
with scope(self.model.device.type), self.model.ema_scope():
|
||||
for n in trange(iterations, desc='Generating'):
|
||||
# make_image will call Img2Img which will do the equivalent of get_noise itself
|
||||
image = make_image()
|
||||
results.append([image, seed])
|
||||
if image_callback is not None:
|
||||
image_callback(image, seed)
|
||||
seed = self.new_seed()
|
||||
return results
|
||||
|
||||
@torch.no_grad()
|
||||
def get_make_image(
|
||||
self,
|
||||
@ -151,8 +175,19 @@ class Embiggen(Generator):
|
||||
# Clamp values to max 255
|
||||
if distanceToLR > 255:
|
||||
distanceToLR = 255
|
||||
# Place the pixel as invert of distance
|
||||
agradientC.putpixel((x, y), int(255 - distanceToLR))
|
||||
#Place the pixel as invert of distance
|
||||
agradientC.putpixel((x, y), round(255 - distanceToLR))
|
||||
|
||||
# Create alternative asymmetric diagonal corner to use on "tailing" intersections to prevent hard edges
|
||||
# Fits for a left-fading gradient on the bottom side and full opacity on the right side.
|
||||
agradientAsymC = Image.new('L', (256, 256))
|
||||
for y in range(256):
|
||||
for x in range(256):
|
||||
value = round(max(0, x-(255-y)) * (255 / max(1,y)))
|
||||
#Clamp values
|
||||
value = max(0, value)
|
||||
value = min(255, value)
|
||||
agradientAsymC.putpixel((x, y), value)
|
||||
|
||||
# Create alpha layers default fully white
|
||||
alphaLayerL = Image.new("L", (width, height), 255)
|
||||
@ -163,8 +198,13 @@ class Embiggen(Generator):
|
||||
alphaLayerT.paste(agradientT, (0, 0))
|
||||
alphaLayerLTC.paste(agradientL, (0, 0))
|
||||
alphaLayerLTC.paste(agradientT, (0, 0))
|
||||
alphaLayerLTC.paste(agradientC.resize(
|
||||
(overlap_size_x, overlap_size_y)), (0, 0))
|
||||
alphaLayerLTC.paste(agradientC.resize((overlap_size_x, overlap_size_y)), (0, 0))
|
||||
# make masks with an asymmetric upper-right corner so when the curved transparent corner of the next tile
|
||||
# to its right is placed it doesn't reveal a hard trailing semi-transparent edge in the overlapping space
|
||||
alphaLayerTaC = alphaLayerT.copy()
|
||||
alphaLayerTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0))
|
||||
alphaLayerLTaC = alphaLayerLTC.copy()
|
||||
alphaLayerLTaC.paste(agradientAsymC.rotate(270).resize((overlap_size_x, overlap_size_y)), (width - overlap_size_x, 0))
|
||||
|
||||
if embiggen_tiles:
|
||||
# Individual unconnected sides
|
||||
@ -242,7 +282,7 @@ class Embiggen(Generator):
|
||||
del agradientT
|
||||
del agradientC
|
||||
|
||||
def make_image(x_T):
|
||||
def make_image():
|
||||
# Make main tiles -------------------------------------------------
|
||||
if embiggen_tiles:
|
||||
print(f'>> Making {len(embiggen_tiles)} Embiggen tiles...')
|
||||
@ -251,7 +291,20 @@ class Embiggen(Generator):
|
||||
f'>> Making {(emb_tiles_x * emb_tiles_y)} Embiggen tiles ({emb_tiles_x}x{emb_tiles_y})...')
|
||||
|
||||
emb_tile_store = []
|
||||
# Although we could use the same seed for every tile for determinism, at higher strengths this may
|
||||
# produce duplicated structures for each tile and make the tiling effect more obvious
|
||||
# instead track and iterate a local seed we pass to Img2Img
|
||||
seed = self.seed
|
||||
seedintlimit = np.iinfo(np.uint32).max - 1 # only retreive this one from numpy
|
||||
|
||||
for tile in range(emb_tiles_x * emb_tiles_y):
|
||||
# Don't iterate on first tile
|
||||
if tile != 0:
|
||||
if seed < seedintlimit:
|
||||
seed += 1
|
||||
else:
|
||||
seed = 0
|
||||
|
||||
# Determine if this is a re-run and replace
|
||||
if embiggen_tiles and not tile in embiggen_tiles:
|
||||
continue
|
||||
@ -294,21 +347,20 @@ class Embiggen(Generator):
|
||||
|
||||
tile_results = gen_img2img.generate(
|
||||
prompt,
|
||||
iterations=1,
|
||||
seed=self.seed,
|
||||
sampler=sampler,
|
||||
steps=steps,
|
||||
cfg_scale=cfg_scale,
|
||||
conditioning=conditioning,
|
||||
ddim_eta=ddim_eta,
|
||||
image_callback=None, # called only after the final image is generated
|
||||
step_callback=step_callback, # called after each intermediate image is generated
|
||||
width=width,
|
||||
height=height,
|
||||
init_img=init_img, # img2img doesn't need this, but it might in the future
|
||||
init_image=newinitimage, # notice that init_image is different from init_img
|
||||
mask_image=None,
|
||||
strength=strength,
|
||||
iterations = 1,
|
||||
seed = seed,
|
||||
sampler = sampler,
|
||||
steps = steps,
|
||||
cfg_scale = cfg_scale,
|
||||
conditioning = conditioning,
|
||||
ddim_eta = ddim_eta,
|
||||
image_callback = None, # called only after the final image is generated
|
||||
step_callback = step_callback, # called after each intermediate image is generated
|
||||
width = width,
|
||||
height = height,
|
||||
init_image = newinitimage, # notice that init_image is different from init_img
|
||||
mask_image = None,
|
||||
strength = strength,
|
||||
)
|
||||
|
||||
emb_tile_store.append(tile_results[0][0])
|
||||
@ -381,24 +433,24 @@ class Embiggen(Generator):
|
||||
# bottom of image
|
||||
elif emb_row_i == emb_tiles_y - 1:
|
||||
if emb_column_i == 0:
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
intileimage.putalpha(alphaLayerT)
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
intileimage.putalpha(alphaLayerTaC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerRTC)
|
||||
elif emb_column_i == emb_tiles_x - 1:
|
||||
# No tiles to look ahead to
|
||||
intileimage.putalpha(alphaLayerLTC)
|
||||
else:
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
intileimage.putalpha(alphaLayerLTC)
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
intileimage.putalpha(alphaLayerLTaC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerABB)
|
||||
# vertical middle of image
|
||||
else:
|
||||
if emb_column_i == 0:
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
|
||||
intileimage.putalpha(alphaLayerT)
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
|
||||
intileimage.putalpha(alphaLayerTaC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerTB)
|
||||
elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only
|
||||
@ -411,9 +463,9 @@ class Embiggen(Generator):
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerABR)
|
||||
else:
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
|
||||
intileimage.putalpha(alphaLayerLTC)
|
||||
if (tile+1) in embiggen_tiles: # Look-ahead right
|
||||
if (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down
|
||||
intileimage.putalpha(alphaLayerLTaC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerABR)
|
||||
elif (tile+emb_tiles_x) in embiggen_tiles: # Look-ahead down only
|
||||
@ -425,9 +477,15 @@ class Embiggen(Generator):
|
||||
if emb_row_i == 0 and emb_column_i >= 1:
|
||||
intileimage.putalpha(alphaLayerL)
|
||||
elif emb_row_i >= 1 and emb_column_i == 0:
|
||||
intileimage.putalpha(alphaLayerT)
|
||||
if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right
|
||||
intileimage.putalpha(alphaLayerT)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerTaC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerLTC)
|
||||
if emb_column_i + 1 == emb_tiles_x: # If we don't have anything that can be placed to the right
|
||||
intileimage.putalpha(alphaLayerLTC)
|
||||
else:
|
||||
intileimage.putalpha(alphaLayerLTaC)
|
||||
# Layer tile onto final image
|
||||
outputsuperimage.alpha_composite(intileimage, (left, top))
|
||||
else:
|
||||
|
@ -28,6 +28,10 @@ class Txt2Img(Generator):
|
||||
height // self.downsampling_factor,
|
||||
width // self.downsampling_factor,
|
||||
]
|
||||
|
||||
if self.free_gpu_mem and self.model.model.device != self.model.device:
|
||||
self.model.model.to(self.model.device)
|
||||
|
||||
samples, _ = sampler.sample(
|
||||
batch_size = 1,
|
||||
S = steps,
|
||||
@ -41,6 +45,10 @@ class Txt2Img(Generator):
|
||||
img_callback = step_callback,
|
||||
threshold = threshold,
|
||||
)
|
||||
|
||||
if self.free_gpu_mem:
|
||||
self.model.model.to("cpu")
|
||||
|
||||
return self.sample_to_image(samples)
|
||||
|
||||
return make_image
|
||||
|
@ -14,73 +14,53 @@ class ESRGAN():
|
||||
else:
|
||||
use_half_precision = True
|
||||
|
||||
def load_esrgan_bg_upsampler(self, upsampler_scale):
|
||||
def load_esrgan_bg_upsampler(self):
|
||||
if not torch.cuda.is_available(): # CPU or MPS on M1
|
||||
use_half_precision = False
|
||||
else:
|
||||
use_half_precision = True
|
||||
|
||||
model_path = {
|
||||
2: 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
|
||||
4: 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth',
|
||||
}
|
||||
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
||||
from realesrgan import RealESRGANer
|
||||
|
||||
if upsampler_scale not in model_path:
|
||||
return None
|
||||
else:
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from realesrgan import RealESRGANer
|
||||
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
|
||||
model_path = 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
|
||||
scale = 4
|
||||
|
||||
if upsampler_scale == 4:
|
||||
model = RRDBNet(
|
||||
num_in_ch=3,
|
||||
num_out_ch=3,
|
||||
num_feat=64,
|
||||
num_block=23,
|
||||
num_grow_ch=32,
|
||||
scale=4,
|
||||
)
|
||||
if upsampler_scale == 2:
|
||||
model = RRDBNet(
|
||||
num_in_ch=3,
|
||||
num_out_ch=3,
|
||||
num_feat=64,
|
||||
num_block=23,
|
||||
num_grow_ch=32,
|
||||
scale=2,
|
||||
)
|
||||
|
||||
bg_upsampler = RealESRGANer(
|
||||
scale=upsampler_scale,
|
||||
model_path=model_path[upsampler_scale],
|
||||
model=model,
|
||||
tile=self.bg_tile_size,
|
||||
tile_pad=10,
|
||||
pre_pad=0,
|
||||
half=use_half_precision,
|
||||
)
|
||||
bg_upsampler = RealESRGANer(
|
||||
scale=scale,
|
||||
model_path=model_path,
|
||||
model=model,
|
||||
tile=self.bg_tile_size,
|
||||
tile_pad=10,
|
||||
pre_pad=0,
|
||||
half=use_half_precision,
|
||||
)
|
||||
|
||||
return bg_upsampler
|
||||
|
||||
def process(self, image, strength: float, seed: str = None, upsampler_scale: int = 2):
|
||||
if seed is not None:
|
||||
print(
|
||||
f'>> Real-ESRGAN Upscaling seed:{seed} : scale:{upsampler_scale}x'
|
||||
)
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings('ignore', category=DeprecationWarning)
|
||||
warnings.filterwarnings('ignore', category=UserWarning)
|
||||
|
||||
try:
|
||||
upsampler = self.load_esrgan_bg_upsampler(upsampler_scale)
|
||||
upsampler = self.load_esrgan_bg_upsampler()
|
||||
except Exception:
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
print('>> Error loading Real-ESRGAN:', file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
|
||||
if upsampler_scale == 0:
|
||||
print('>> Real-ESRGAN: Invalid scaling option. Image not upscaled.')
|
||||
return image
|
||||
|
||||
if seed is not None:
|
||||
print(
|
||||
f'>> Real-ESRGAN Upscaling seed:{seed} : scale:{upsampler_scale}x'
|
||||
)
|
||||
|
||||
output, _ = upsampler.enhance(
|
||||
np.array(image, dtype=np.uint8),
|
||||
outscale=upsampler_scale,
|
||||
|
@ -507,11 +507,8 @@ class Generate:
|
||||
prompt = None
|
||||
try:
|
||||
args = metadata_from_png(image_path)
|
||||
if len(args) > 1:
|
||||
print("* Can't postprocess a grid")
|
||||
return
|
||||
seed = args[0].seed
|
||||
prompt = args[0].prompt
|
||||
seed = args.seed
|
||||
prompt = args.prompt
|
||||
print(f'>> retrieved seed {seed} and prompt "{prompt}" from {image_path}')
|
||||
except:
|
||||
m = re.search('(\d+)\.png$',image_path)
|
||||
@ -665,6 +662,7 @@ class Generate:
|
||||
if not self.generators.get('txt2img'):
|
||||
from ldm.dream.generator.txt2img import Txt2Img
|
||||
self.generators['txt2img'] = Txt2Img(self.model, self.precision)
|
||||
self.generators['txt2img'].free_gpu_mem = self.free_gpu_mem
|
||||
return self.generators['txt2img']
|
||||
|
||||
def _make_inpaint(self):
|
||||
@ -733,14 +731,6 @@ class Generate:
|
||||
for r in image_list:
|
||||
image, seed = r
|
||||
try:
|
||||
if upscale is not None:
|
||||
if self.esrgan is not None:
|
||||
if len(upscale) < 2:
|
||||
upscale.append(0.75)
|
||||
image = self.esrgan.process(
|
||||
image, upscale[1], seed, int(upscale[0]))
|
||||
else:
|
||||
print(">> ESRGAN is disabled. Image not upscaled.")
|
||||
if strength > 0:
|
||||
if self.gfpgan is not None or self.codeformer is not None:
|
||||
if facetool == 'gfpgan':
|
||||
@ -756,6 +746,14 @@ class Generate:
|
||||
image = self.codeformer.process(image=image, strength=strength, device=cf_device, seed=seed, fidelity=codeformer_fidelity)
|
||||
else:
|
||||
print(">> Face Restoration is disabled.")
|
||||
if upscale is not None:
|
||||
if self.esrgan is not None:
|
||||
if len(upscale) < 2:
|
||||
upscale.append(0.75)
|
||||
image = self.esrgan.process(
|
||||
image, upscale[1], seed, int(upscale[0]))
|
||||
else:
|
||||
print(">> ESRGAN is disabled. Image not upscaled.")
|
||||
except Exception as e:
|
||||
print(
|
||||
f'>> Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}'
|
||||
|
@ -13,7 +13,7 @@
|
||||
"source": [
|
||||
"Note that you will need NVIDIA drivers, Python 3.10, and Git installed\n",
|
||||
"beforehand - simplified\n",
|
||||
"[step-by-step instructions](https://github.com/lstein/stable-diffusion/wiki/Easy-peasy-Windows-install)\n",
|
||||
"[step-by-step instructions](https://github.com/invoke-ai/InvokeAI/wiki/Easy-peasy-Windows-install)\n",
|
||||
"are available in the wiki (you'll only need steps 1, 2, & 3 )"
|
||||
]
|
||||
},
|
||||
@ -40,8 +40,8 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%cmd\n",
|
||||
"git clone https://github.com/lstein/stable-diffusion.git\n",
|
||||
"cd /content/stable-diffusion/\n",
|
||||
"git clone https://github.com/invoke-ai/InvokeAI.git\n",
|
||||
"cd /content/InvokeAI/\n",
|
||||
"git checkout --quiet development"
|
||||
]
|
||||
},
|
||||
@ -52,14 +52,14 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%%cmd\n",
|
||||
"pew new --python 3.10 -r requirements-lin-win-colab-CUDA.txt --dont-activate stable-diffusion"
|
||||
"pew new --python 3.10 -r requirements-lin-win-colab-CUDA.txt --dont-activate invoke-ai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Switch the notebook kernel to the new 'stable-diffusion' environment!\n",
|
||||
"# Switch the notebook kernel to the new 'invoke-ai' environment!\n",
|
||||
"\n",
|
||||
"## VSCode: restart VSCode and come back to this cell\n",
|
||||
"\n",
|
||||
@ -67,7 +67,7 @@
|
||||
"1. Type \"Select Interpreter\" and select \"Jupyter: Select Interpreter to Start Jupyter Server\"\n",
|
||||
"1. VSCode will say that it needs to install packages. Click the \"Install\" button.\n",
|
||||
"1. Once the install is finished, do 1 & 2 again\n",
|
||||
"1. Pick 'stable-diffusion'\n",
|
||||
"1. Pick 'invoke-ai'\n",
|
||||
"1. Run the following cell"
|
||||
]
|
||||
},
|
||||
@ -77,7 +77,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%cd stable-diffusion"
|
||||
"%cd InvokeAI"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -88,7 +88,7 @@
|
||||
"## Jupyter/JupyterLab\n",
|
||||
"\n",
|
||||
"1. Run the cell below\n",
|
||||
"1. Click on the toolbar where it says \"(ipyknel)\" ↗️. You should get a pop-up asking you to \"Select Kernel\". Pick 'stable-diffusion' from the drop-down.\n"
|
||||
"1. Click on the toolbar where it says \"(ipyknel)\" ↗️. You should get a pop-up asking you to \"Select Kernel\". Pick 'invoke-ai' from the drop-down.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -106,9 +106,9 @@
|
||||
"source": [
|
||||
"# DO NOT RUN THIS CELL IF YOU ARE USING VSCODE!!\n",
|
||||
"%%cmd\n",
|
||||
"pew workon stable-diffusion\n",
|
||||
"pew workon invoke-ai\n",
|
||||
"pip3 install ipykernel\n",
|
||||
"python -m ipykernel install --name=stable-diffusion"
|
||||
"python -m ipykernel install --name=invoke-ai"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -182,15 +182,20 @@
|
||||
"\n",
|
||||
"Now:\n",
|
||||
"\n",
|
||||
"1. `cd` to wherever the 'stable-diffusion' directory is\n",
|
||||
"1. Run `pew workon stable-diffusion`\n",
|
||||
"1. `cd` to wherever the 'InvokeAI' directory is\n",
|
||||
"1. Run `pew workon invoke-ai`\n",
|
||||
"1. Run `winpty python scripts\\dream.py`"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.10.6 ('ldm')",
|
||||
"display_name": "Python 3.10.6 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@ -208,7 +213,7 @@
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "a05e4574567b7bc2c98f7f9aa579f9ea5b8739b54844ab610ac85881c4be2659"
|
||||
"hash": "5e164cef426134bf171f386fbddecb52046b6c1479f922ab8dfdd30df05e0e80"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -108,6 +108,8 @@ def main():
|
||||
|
||||
# preload the model
|
||||
gen.load_model()
|
||||
#set additional option
|
||||
gen.free_gpu_mem = opt.free_gpu_mem
|
||||
|
||||
if not infile:
|
||||
print(
|
||||
@ -433,7 +435,7 @@ def dream_server_loop(gen, host, port, outdir, gfpgan):
|
||||
f"Point your browser at http://localhost:{port} or use the host's DNS name or IP address.")
|
||||
else:
|
||||
print(">> Default host address now 127.0.0.1 (localhost). Use --host 0.0.0.0 to bind any address.")
|
||||
print(f">> Point your browser at http://{host}:{port}.")
|
||||
print(f">> Point your browser at http://{host}:{port}")
|
||||
|
||||
try:
|
||||
dream_server.serve_forever()
|
||||
|
@ -49,33 +49,13 @@ except ModuleNotFoundError:
|
||||
if gfpgan:
|
||||
print('Loading models from RealESRGAN and facexlib')
|
||||
try:
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
||||
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
|
||||
|
||||
RealESRGANer(
|
||||
scale=2,
|
||||
model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
|
||||
model=RRDBNet(
|
||||
num_in_ch=3,
|
||||
num_out_ch=3,
|
||||
num_feat=64,
|
||||
num_block=23,
|
||||
num_grow_ch=32,
|
||||
scale=2,
|
||||
),
|
||||
)
|
||||
|
||||
RealESRGANer(
|
||||
scale=4,
|
||||
model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth',
|
||||
model=RRDBNet(
|
||||
num_in_ch=3,
|
||||
num_out_ch=3,
|
||||
num_feat=64,
|
||||
num_block=23,
|
||||
num_grow_ch=32,
|
||||
scale=4,
|
||||
),
|
||||
model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth',
|
||||
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
|
||||
)
|
||||
|
||||
FaceRestoreHelper(1, det_model='retinaface_resnet50')
|
||||
|
Loading…
Reference in New Issue
Block a user