Merge branch 'main' into bugfix/convert-v2-models

This commit is contained in:
Lincoln Stein 2023-02-11 14:27:52 -05:00 committed by GitHub
commit 717d53a773
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 19 additions and 15 deletions

View File

@ -125,7 +125,7 @@ manager, please follow these steps:
=== "Windows" === "Windows"
```ps ```ps
.venv\script\activate .venv\Scripts\activate
``` ```
If you get a permissions error at this point, run this command and try again If you get a permissions error at this point, run this command and try again
@ -295,13 +295,12 @@ on your system, please see the [Git Installation
Guide](https://github.com/git-guides/install-git) Guide](https://github.com/git-guides/install-git)
1. From the command line, run this command: 1. From the command line, run this command:
```bash ```bash
git clone https://github.com/invoke-ai/InvokeAI.git git clone https://github.com/invoke-ai/InvokeAI.git
``` ```
This will create a directory named `InvokeAI` and populate it with the This will create a directory named `InvokeAI` and populate it with the
full source code from the InvokeAI repository. full source code from the InvokeAI repository.
2. Activate the InvokeAI virtual environment as per step (4) of the manual 2. Activate the InvokeAI virtual environment as per step (4) of the manual
installation protocol (important!) installation protocol (important!)
@ -342,7 +341,7 @@ installation protocol (important!)
repository. You can then use GitHub functions to create and submit repository. You can then use GitHub functions to create and submit
pull requests to contribute improvements to the project. pull requests to contribute improvements to the project.
Please see [Contributing](/index.md#Contributing) for hints Please see [Contributing](../index.md#contributing) for hints
on getting started. on getting started.
### Unsupported Conda Install ### Unsupported Conda Install

View File

@ -321,6 +321,7 @@ class Generate:
codeformer_fidelity = None, codeformer_fidelity = None,
save_original = False, save_original = False,
upscale = None, upscale = None,
upscale_denoise_str = 0.75,
# this is specific to inpainting and causes more extreme inpainting # this is specific to inpainting and causes more extreme inpainting
inpaint_replace = 0.0, inpaint_replace = 0.0,
# This controls the size at which inpaint occurs (scaled up for inpaint, then back down for the result) # This controls the size at which inpaint occurs (scaled up for inpaint, then back down for the result)
@ -560,6 +561,7 @@ class Generate:
if upscale is not None or facetool_strength > 0: if upscale is not None or facetool_strength > 0:
self.upscale_and_reconstruct(results, self.upscale_and_reconstruct(results,
upscale = upscale, upscale = upscale,
upscale_denoise_str = upscale_denoise_str,
facetool = facetool, facetool = facetool,
strength = facetool_strength, strength = facetool_strength,
codeformer_fidelity = codeformer_fidelity, codeformer_fidelity = codeformer_fidelity,
@ -633,6 +635,7 @@ class Generate:
facetool_strength = 0.0, facetool_strength = 0.0,
codeformer_fidelity = 0.75, codeformer_fidelity = 0.75,
upscale = None, upscale = None,
upscale_denoise_str = 0.75,
out_direction = None, out_direction = None,
outcrop = [], outcrop = [],
save_original = True, # to get new name save_original = True, # to get new name
@ -684,6 +687,7 @@ class Generate:
codeformer_fidelity = codeformer_fidelity, codeformer_fidelity = codeformer_fidelity,
save_original = save_original, save_original = save_original,
upscale = upscale, upscale = upscale,
upscale_denoise_str = upscale_denoise_str,
image_callback = callback, image_callback = callback,
prefix = prefix, prefix = prefix,
) )
@ -952,6 +956,7 @@ class Generate:
image_list, image_list,
facetool = 'gfpgan', facetool = 'gfpgan',
upscale = None, upscale = None,
upscale_denoise_str = 0.75,
strength = 0.0, strength = 0.0,
codeformer_fidelity = 0.75, codeformer_fidelity = 0.75,
save_original = False, save_original = False,
@ -982,7 +987,7 @@ class Generate:
if len(upscale) < 2: if len(upscale) < 2:
upscale.append(0.75) upscale.append(0.75)
image = self.esrgan.process( image = self.esrgan.process(
image, upscale[1], seed, int(upscale[0])) image, upscale[1], seed, int(upscale[0]), denoise_str=upscale_denoise_str)
else: else:
print(">> ESRGAN is disabled. Image not upscaled.") print(">> ESRGAN is disabled. Image not upscaled.")
except Exception as e: except Exception as e:

View File

@ -58,12 +58,9 @@ def main():
print(f'>> Internet connectivity is {Globals.internet_available}') print(f'>> Internet connectivity is {Globals.internet_available}')
if not args.conf: if not args.conf:
if not os.path.exists(os.path.join(Globals.root,'configs','models.yaml')): config_file = os.path.join(Globals.root,'configs','models.yaml')
report_model_error(opt, e) if not os.path.exists(config_file):
# print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.") report_model_error(opt, FileNotFoundError(f"The file {config_file} could not be found."))
# print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.')
# print('** This script will now exit.')
# sys.exit(-1)
print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}') print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}')
print(f'>> InvokeAI runtime directory is "{Globals.root}"') print(f'>> InvokeAI runtime directory is "{Globals.root}"')
@ -659,7 +656,6 @@ def import_ckpt_model(path_or_url: Union[Path, str], gen, opt, completer) -> Opt
) )
if not (config_file := _ask_for_config_file(path_or_url, completer)): if not (config_file := _ask_for_config_file(path_or_url, completer)):
return return
completer.complete_extensions(('.ckpt','.safetensors')) completer.complete_extensions(('.ckpt','.safetensors'))
vae = None vae = None
default = Path(Globals.root,'models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt') default = Path(Globals.root,'models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt')
@ -742,7 +738,10 @@ def optimize_model(model_name_or_path:str, gen, opt, completer):
ckpt_path = None ckpt_path = None
original_config_file = None original_config_file = None
if (model_info := manager.model_info(model_name_or_path)): if model_name_or_path == gen.model_name:
print("** Can't convert the active model. !switch to another model first. **")
return
elif (model_info := manager.model_info(model_name_or_path)):
if 'weights' in model_info: if 'weights' in model_info:
ckpt_path = Path(model_info['weights']) ckpt_path = Path(model_info['weights'])
original_config_file = Path(model_info['config']) original_config_file = Path(model_info['config'])
@ -914,6 +913,7 @@ def do_postprocess (gen, opt, callback):
codeformer_fidelity = opt.codeformer_fidelity, codeformer_fidelity = opt.codeformer_fidelity,
save_original = opt.save_original, save_original = opt.save_original,
upscale = opt.upscale, upscale = opt.upscale,
upscale_denoise_str = opt.esrgan_denoise_str,
out_direction = opt.out_direction, out_direction = opt.out_direction,
outcrop = opt.outcrop, outcrop = opt.outcrop,
callback = callback, callback = callback,
@ -975,7 +975,7 @@ def prepare_image_metadata(
print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use {{prefix}}.{{seed}}.png\' instead') print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use {{prefix}}.{{seed}}.png\' instead')
filename = f'{prefix}.{seed}.png' filename = f'{prefix}.{seed}.png'
except IndexError: except IndexError:
print(f'** The filename format is broken or complete. Will use \'{{prefix}}.{{seed}}.png\' instead') print("** The filename format is broken or complete. Will use '{prefix}.{seed}.png' instead")
filename = f'{prefix}.{seed}.png' filename = f'{prefix}.{seed}.png'
if opt.variation_amount > 0: if opt.variation_amount > 0: