From d589ad96aac873e273435e1271628c488d15254a Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 10 Feb 2023 15:06:37 -0500 Subject: [PATCH 1/4] fix two bugs in conversion of inpaint models from ckpt to diffusers models - If CLI asked to convert the currently loaded model, the model would crash on the first rendering. CLI will now refuse to convert a model loaded in memory (probably a good idea in any case). - CLI will offer the `v1-inpainting-inference.yaml` as the configuration file when importing an inpainting a .ckpt or .safetensors file that has "inpainting" in the name. Otherwise it offers `v1-inference.yaml` as the default. --- ldm/invoke/CLI.py | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index fd61c7c8bf..2c204bb33f 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -58,12 +58,9 @@ def main(): print(f'>> Internet connectivity is {Globals.internet_available}') if not args.conf: - if not os.path.exists(os.path.join(Globals.root,'configs','models.yaml')): - report_model_error(opt, e) - # print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.") - # print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.') - # print('** This script will now exit.') - # sys.exit(-1) + config_file = os.path.join(Globals.root,'configs','models.yaml') + if not os.path.exists(config_file): + report_model_error(opt, FileNotFoundError(f"The file {config_file} could not be found.")) print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}') print(f'>> InvokeAI runtime directory is "{Globals.root}"') @@ -658,7 +655,9 @@ def import_ckpt_model(path_or_url: Union[Path, str], gen, opt, completer) -> Opt model_description=default_description ) config_file = None - default = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml') + default = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml') \ + if re.search('inpaint',default_name, flags=re.IGNORECASE) \ + else Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml') completer.complete_extensions(('.yaml','.yml')) completer.set_line(str(default)) @@ -709,12 +708,21 @@ def _get_model_name_and_desc(model_manager,completer,model_name:str='',model_des model_description = input(f'Description for this model [{model_description}]: ').strip() or model_description return model_name, model_description -def optimize_model(model_name_or_path:str, gen, opt, completer): +def _is_inpainting(model_name_or_path: str)->bool: + if re.search('inpaint',model_name_or_path, flags=re.IGNORECASE): + return not input('Is this an inpainting model? [y] ').startswith(('n','N')) + else: + return not input('Is this an inpainting model? [n] ').startswith(('y','Y')) + +def optimize_model(model_name_or_path: str, gen, opt, completer): manager = gen.model_manager ckpt_path = None original_config_file = None - if (model_info := manager.model_info(model_name_or_path)): + if model_name_or_path == gen.model_name: + print("** Can't convert the active model. !switch to another model first. **") + return + elif (model_info := manager.model_info(model_name_or_path)): if 'weights' in model_info: ckpt_path = Path(model_info['weights']) original_config_file = Path(model_info['config']) @@ -731,7 +739,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer): ckpt_path.stem, f'Converted model {ckpt_path.stem}' ) - is_inpainting = input('Is this an inpainting model? [n] ').startswith(('y','Y')) + is_inpainting = _is_inpainting(model_name_or_path) original_config_file = Path( 'configs', 'stable-diffusion', @@ -950,7 +958,7 @@ def prepare_image_metadata( print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use {{prefix}}.{{seed}}.png\' instead') filename = f'{prefix}.{seed}.png' except IndexError: - print(f'** The filename format is broken or complete. Will use \'{{prefix}}.{{seed}}.png\' instead') + print("** The filename format is broken or complete. Will use '{prefix}.{seed}.png' instead") filename = f'{prefix}.{seed}.png' if opt.variation_amount > 0: From 106b3aea1b1b4a7b605c69568eaaac0aa86c73a3 Mon Sep 17 00:00:00 2001 From: blhook <89283782+blhook@users.noreply.github.com> Date: Sat, 11 Feb 2023 00:29:44 -0800 Subject: [PATCH 2/4] Fix incorrect Windows env activation location Change broken link to Contributing inside of Developer Install Minor format modification to allow for numbered list to appear properly --- docs/installation/020_INSTALL_MANUAL.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/docs/installation/020_INSTALL_MANUAL.md b/docs/installation/020_INSTALL_MANUAL.md index cf9063729f..711df0f8f9 100644 --- a/docs/installation/020_INSTALL_MANUAL.md +++ b/docs/installation/020_INSTALL_MANUAL.md @@ -125,7 +125,7 @@ manager, please follow these steps: === "Windows" ```ps - .venv\script\activate + .venv\Scripts\activate ``` If you get a permissions error at this point, run this command and try again @@ -295,13 +295,12 @@ on your system, please see the [Git Installation Guide](https://github.com/git-guides/install-git) 1. From the command line, run this command: - ```bash git clone https://github.com/invoke-ai/InvokeAI.git ``` -This will create a directory named `InvokeAI` and populate it with the -full source code from the InvokeAI repository. + This will create a directory named `InvokeAI` and populate it with the + full source code from the InvokeAI repository. 2. Activate the InvokeAI virtual environment as per step (4) of the manual installation protocol (important!) @@ -342,7 +341,7 @@ installation protocol (important!) repository. You can then use GitHub functions to create and submit pull requests to contribute improvements to the project. - Please see [Contributing](/index.md#Contributing) for hints + Please see [Contributing](../index.md#contributing) for hints on getting started. ### Unsupported Conda Install From c00155f6a49e54b023363d083a0781936101903e Mon Sep 17 00:00:00 2001 From: tyler Date: Fri, 10 Feb 2023 18:54:37 -0600 Subject: [PATCH 3/4] pulling esrgan denoise strength through to the generate API. --- ldm/generate.py | 7 ++++++- ldm/invoke/CLI.py | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ldm/generate.py b/ldm/generate.py index ca05478823..c242826f03 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -321,6 +321,7 @@ class Generate: codeformer_fidelity = None, save_original = False, upscale = None, + upscale_denoise_str = 0.75, # this is specific to inpainting and causes more extreme inpainting inpaint_replace = 0.0, # This controls the size at which inpaint occurs (scaled up for inpaint, then back down for the result) @@ -560,6 +561,7 @@ class Generate: if upscale is not None or facetool_strength > 0: self.upscale_and_reconstruct(results, upscale = upscale, + upscale_denoise_str = upscale_denoise_str, facetool = facetool, strength = facetool_strength, codeformer_fidelity = codeformer_fidelity, @@ -633,6 +635,7 @@ class Generate: facetool_strength = 0.0, codeformer_fidelity = 0.75, upscale = None, + upscale_denoise_str = 0.75, out_direction = None, outcrop = [], save_original = True, # to get new name @@ -684,6 +687,7 @@ class Generate: codeformer_fidelity = codeformer_fidelity, save_original = save_original, upscale = upscale, + upscale_denoise_str = upscale_denoise_str, image_callback = callback, prefix = prefix, ) @@ -952,6 +956,7 @@ class Generate: image_list, facetool = 'gfpgan', upscale = None, + upscale_denoise_str = 0.75, strength = 0.0, codeformer_fidelity = 0.75, save_original = False, @@ -982,7 +987,7 @@ class Generate: if len(upscale) < 2: upscale.append(0.75) image = self.esrgan.process( - image, upscale[1], seed, int(upscale[0])) + image, upscale[1], seed, int(upscale[0]), upscale_denoise_str=upscale_denoise_str) else: print(">> ESRGAN is disabled. Image not upscaled.") except Exception as e: diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index 2c204bb33f..32c6d816be 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -897,6 +897,7 @@ def do_postprocess (gen, opt, callback): codeformer_fidelity = opt.codeformer_fidelity, save_original = opt.save_original, upscale = opt.upscale, + upscale_denoise_str = opt.esrgan_denoise_str, out_direction = opt.out_direction, outcrop = opt.outcrop, callback = callback, From d3c850104bd6a369f9d5a5668691dd24d9e775a4 Mon Sep 17 00:00:00 2001 From: tyler Date: Fri, 10 Feb 2023 20:16:08 -0600 Subject: [PATCH 4/4] pulling esrgan denoise strength through to the generate API. --- ldm/generate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldm/generate.py b/ldm/generate.py index c242826f03..32a6a929a8 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -987,7 +987,7 @@ class Generate: if len(upscale) < 2: upscale.append(0.75) image = self.esrgan.process( - image, upscale[1], seed, int(upscale[0]), upscale_denoise_str=upscale_denoise_str) + image, upscale[1], seed, int(upscale[0]), denoise_str=upscale_denoise_str) else: print(">> ESRGAN is disabled. Image not upscaled.") except Exception as e: