diff --git a/docs/installation/020_INSTALL_MANUAL.md b/docs/installation/020_INSTALL_MANUAL.md index cf9063729f..711df0f8f9 100644 --- a/docs/installation/020_INSTALL_MANUAL.md +++ b/docs/installation/020_INSTALL_MANUAL.md @@ -125,7 +125,7 @@ manager, please follow these steps: === "Windows" ```ps - .venv\script\activate + .venv\Scripts\activate ``` If you get a permissions error at this point, run this command and try again @@ -295,13 +295,12 @@ on your system, please see the [Git Installation Guide](https://github.com/git-guides/install-git) 1. From the command line, run this command: - ```bash git clone https://github.com/invoke-ai/InvokeAI.git ``` -This will create a directory named `InvokeAI` and populate it with the -full source code from the InvokeAI repository. + This will create a directory named `InvokeAI` and populate it with the + full source code from the InvokeAI repository. 2. Activate the InvokeAI virtual environment as per step (4) of the manual installation protocol (important!) @@ -342,7 +341,7 @@ installation protocol (important!) repository. You can then use GitHub functions to create and submit pull requests to contribute improvements to the project. - Please see [Contributing](/index.md#Contributing) for hints + Please see [Contributing](../index.md#contributing) for hints on getting started. ### Unsupported Conda Install diff --git a/ldm/generate.py b/ldm/generate.py index ca05478823..32a6a929a8 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -321,6 +321,7 @@ class Generate: codeformer_fidelity = None, save_original = False, upscale = None, + upscale_denoise_str = 0.75, # this is specific to inpainting and causes more extreme inpainting inpaint_replace = 0.0, # This controls the size at which inpaint occurs (scaled up for inpaint, then back down for the result) @@ -560,6 +561,7 @@ class Generate: if upscale is not None or facetool_strength > 0: self.upscale_and_reconstruct(results, upscale = upscale, + upscale_denoise_str = upscale_denoise_str, facetool = facetool, strength = facetool_strength, codeformer_fidelity = codeformer_fidelity, @@ -633,6 +635,7 @@ class Generate: facetool_strength = 0.0, codeformer_fidelity = 0.75, upscale = None, + upscale_denoise_str = 0.75, out_direction = None, outcrop = [], save_original = True, # to get new name @@ -684,6 +687,7 @@ class Generate: codeformer_fidelity = codeformer_fidelity, save_original = save_original, upscale = upscale, + upscale_denoise_str = upscale_denoise_str, image_callback = callback, prefix = prefix, ) @@ -952,6 +956,7 @@ class Generate: image_list, facetool = 'gfpgan', upscale = None, + upscale_denoise_str = 0.75, strength = 0.0, codeformer_fidelity = 0.75, save_original = False, @@ -982,7 +987,7 @@ class Generate: if len(upscale) < 2: upscale.append(0.75) image = self.esrgan.process( - image, upscale[1], seed, int(upscale[0])) + image, upscale[1], seed, int(upscale[0]), denoise_str=upscale_denoise_str) else: print(">> ESRGAN is disabled. Image not upscaled.") except Exception as e: diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index 6d776e7dcb..0c01586176 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -58,12 +58,9 @@ def main(): print(f'>> Internet connectivity is {Globals.internet_available}') if not args.conf: - if not os.path.exists(os.path.join(Globals.root,'configs','models.yaml')): - report_model_error(opt, e) - # print(f"\n** Error. The file {os.path.join(Globals.root,'configs','models.yaml')} could not be found.") - # print('** Please check the location of your invokeai directory and use the --root_dir option to point to the correct path.') - # print('** This script will now exit.') - # sys.exit(-1) + config_file = os.path.join(Globals.root,'configs','models.yaml') + if not os.path.exists(config_file): + report_model_error(opt, FileNotFoundError(f"The file {config_file} could not be found.")) print(f'>> {ldm.invoke.__app_name__}, version {ldm.invoke.__version__}') print(f'>> InvokeAI runtime directory is "{Globals.root}"') @@ -659,7 +656,6 @@ def import_ckpt_model(path_or_url: Union[Path, str], gen, opt, completer) -> Opt ) if not (config_file := _ask_for_config_file(path_or_url, completer)): return - completer.complete_extensions(('.ckpt','.safetensors')) vae = None default = Path(Globals.root,'models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt') @@ -742,7 +738,10 @@ def optimize_model(model_name_or_path:str, gen, opt, completer): ckpt_path = None original_config_file = None - if (model_info := manager.model_info(model_name_or_path)): + if model_name_or_path == gen.model_name: + print("** Can't convert the active model. !switch to another model first. **") + return + elif (model_info := manager.model_info(model_name_or_path)): if 'weights' in model_info: ckpt_path = Path(model_info['weights']) original_config_file = Path(model_info['config']) @@ -914,6 +913,7 @@ def do_postprocess (gen, opt, callback): codeformer_fidelity = opt.codeformer_fidelity, save_original = opt.save_original, upscale = opt.upscale, + upscale_denoise_str = opt.esrgan_denoise_str, out_direction = opt.out_direction, outcrop = opt.outcrop, callback = callback, @@ -975,7 +975,7 @@ def prepare_image_metadata( print(f'** The filename format contains an unknown key \'{e.args[0]}\'. Will use {{prefix}}.{{seed}}.png\' instead') filename = f'{prefix}.{seed}.png' except IndexError: - print(f'** The filename format is broken or complete. Will use \'{{prefix}}.{{seed}}.png\' instead') + print("** The filename format is broken or complete. Will use '{prefix}.{seed}.png' instead") filename = f'{prefix}.{seed}.png' if opt.variation_amount > 0: