mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
resolved multiple conflicts between PR #683 and subsequent PRs
This commit is contained in:
commit
f408ef2e6c
@ -2,17 +2,16 @@
|
||||
title: Upscale
|
||||
---
|
||||
|
||||
## Intro
|
||||
|
||||
# :material-image-size-select-large: Upscale
|
||||
The script provides the ability to restore faces and upscale. You can apply these operations
|
||||
at the time you generate the images, or at any time to a previously-generated PNG file, using
|
||||
the [!fix](#fixing-previously-generated-images) command.
|
||||
|
||||
## **Intro**
|
||||
## Face Fixing
|
||||
|
||||
The script provides the ability to restore faces and upscale.
|
||||
|
||||
You can enable these features by passing `--restore` and `--esrgan` to your launch script to enable
|
||||
face restoration modules and upscaling modules respectively.
|
||||
|
||||
The default face restoration module is GFPGAN and the default upscaling module is ESRGAN.
|
||||
The default face restoration module is GFPGAN. The default upscale is Real-ESRGAN. For an alternative
|
||||
face restoration module, see [CodeFormer Support] below.
|
||||
|
||||
As of version 1.14, environment.yaml will install the Real-ESRGAN package into the standard install
|
||||
location for python packages, and will put GFPGAN into a subdirectory of "src" in the
|
||||
@ -142,8 +141,6 @@ that is the best restoration possible. This may deviate slightly from the origin
|
||||
excellent option to use in situations when there is very little facial data to work with.
|
||||
|
||||
`<prompt> -G 1.0 -ft codeformer -cf 0.1`
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
|
||||
## Fixing Previously-Generated Images
|
||||
|
||||
@ -162,5 +159,3 @@ the `!fix` command does not replace the original file, unlike the behavior at ge
|
||||
|
||||
If, for some reason, you do not wish to load the GFPGAN and/or ESRGAN libraries, you can disable them
|
||||
on the dream.py command line with the `--no_restore` and `--no_upscale` options, respectively.
|
||||
|
||||
>>>>>>> Update UPSCALE.md
|
||||
|
@ -27,11 +27,29 @@ from ldm.util import instantiate_from_config
|
||||
from ldm.models.diffusion.ddim import DDIMSampler
|
||||
from ldm.models.diffusion.plms import PLMSSampler
|
||||
from ldm.models.diffusion.ksampler import KSampler
|
||||
from ldm.dream.pngwriter import PngWriter, retrieve_metadata
|
||||
from ldm.dream.args import metadata_loads
|
||||
from ldm.dream.image_util import InitImageResizer
|
||||
from ldm.dream.devices import choose_torch_device, choose_precision
|
||||
from ldm.dream.conditioning import get_uc_and_c
|
||||
from ldm.dream.pngwriter import PngWriter, retrieve_metadata
|
||||
from ldm.dream.args import metadata_loads
|
||||
from ldm.dream.image_util import InitImageResizer
|
||||
from ldm.dream.devices import choose_torch_device
|
||||
from ldm.dream.conditioning import get_uc_and_c
|
||||
|
||||
def fix_func(orig):
|
||||
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
|
||||
def new_func(*args, **kw):
|
||||
device = kw.get("device", "mps")
|
||||
kw["device"]="cpu"
|
||||
return orig(*args, **kw).to(device)
|
||||
return new_func
|
||||
return orig
|
||||
|
||||
torch.rand = fix_func(torch.rand)
|
||||
torch.rand_like = fix_func(torch.rand_like)
|
||||
torch.randn = fix_func(torch.randn)
|
||||
torch.randn_like = fix_func(torch.randn_like)
|
||||
torch.randint = fix_func(torch.randint)
|
||||
torch.randint_like = fix_func(torch.randint_like)
|
||||
torch.bernoulli = fix_func(torch.bernoulli)
|
||||
torch.multinomial = fix_func(torch.multinomial)
|
||||
|
||||
def fix_func(orig):
|
||||
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
|
||||
|
@ -76,7 +76,7 @@ def main():
|
||||
gfpgan=gfpgan,
|
||||
codeformer=codeformer,
|
||||
esrgan=esrgan
|
||||
)
|
||||
)
|
||||
except (FileNotFoundError, IOError, KeyError) as e:
|
||||
print(f'{e}. Aborting.')
|
||||
sys.exit(-1)
|
||||
@ -414,20 +414,5 @@ def dream_server_loop(gen, host, port, outdir, gfpgan):
|
||||
|
||||
dream_server.server_close()
|
||||
|
||||
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
def write_log_message(results, log_path):
|
||||
"""logs the name of the output image, prompt, and prompt args to the terminal and log file"""
|
||||
global output_cntr
|
||||
log_lines = [f'{path}: {prompt}\n' for path, prompt in results]
|
||||
for l in log_lines:
|
||||
output_cntr += 1
|
||||
print(f'[{output_cntr}] {l}', end='')
|
||||
|
||||
with open(log_path, 'a', encoding='utf-8') as file:
|
||||
file.writelines(log_lines)
|
||||
|
||||
>>>>>>> GFPGAN and Real ESRGAN Implementation Refactor
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
Loading…
Reference in New Issue
Block a user