From e561d19206875e6bfd24cbd859871b7cc571dcb9 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 12 Feb 2023 17:20:13 -0500 Subject: [PATCH] a few adjustments - fix unused variables and f-strings found by pyflakes - use global_converted_ckpts_dir() to find location of diffusers - fixed bug in model_manager that was causing the description of converted models to read "Optimized version of {model_name}' --- invokeai/backend/invoke_ai_web_server.py | 16 ++++++---------- ldm/invoke/globals.py | 5 ++++- ldm/invoke/model_manager.py | 2 +- 3 files changed, 11 insertions(+), 12 deletions(-) diff --git a/invokeai/backend/invoke_ai_web_server.py b/invokeai/backend/invoke_ai_web_server.py index dbe2714ade..292206ee61 100644 --- a/invokeai/backend/invoke_ai_web_server.py +++ b/invokeai/backend/invoke_ai_web_server.py @@ -28,7 +28,7 @@ from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash from ldm.invoke.conditioning import get_tokens_for_prompt, get_prompt_structure from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState from ldm.invoke.generator.inpaint import infill_methods -from ldm.invoke.globals import Globals +from ldm.invoke.globals import Globals, global_converted_ckpts_dir from ldm.invoke.pngwriter import PngWriter, retrieve_metadata from ldm.invoke.prompt_parser import split_weighted_subprompts, Blend @@ -294,7 +294,7 @@ class InvokeAIWebServer: def load_socketio_listeners(self, socketio): @socketio.on("requestSystemConfig") def handle_request_capabilities(): - print(f">> System config requested") + print(">> System config requested") config = self.get_system_config() config["model_list"] = self.generate.model_manager.list_models() config["infill_methods"] = infill_methods() @@ -428,7 +428,7 @@ class InvokeAIWebServer: ) if model_to_convert['save_location'] == 'root': - diffusers_path = Path(Globals.root, 'models', 'converted_ckpts', f'{model_name}_diffusers') + diffusers_path = Path(global_converted_ckpts_dir(), f'{model_name}_diffusers') if model_to_convert['save_location'] == 'custom' and model_to_convert['custom_location'] is not None: diffusers_path = Path(model_to_convert['custom_location'], f'{model_name}_diffusers') @@ -737,7 +737,7 @@ class InvokeAIWebServer: try: seed = original_image["metadata"]["image"]["seed"] - except (KeyError) as e: + except KeyError: seed = "unknown_seed" pass @@ -837,7 +837,7 @@ class InvokeAIWebServer: @socketio.on("cancel") def handle_cancel(): - print(f">> Cancel processing requested") + print(">> Cancel processing requested") self.canceled.set() # TODO: I think this needs a safety mechanism. @@ -919,9 +919,6 @@ class InvokeAIWebServer: So we need to convert each into a PIL Image. """ - truncated_outpaint_image_b64 = generation_parameters["init_img"][:64] - truncated_outpaint_mask_b64 = generation_parameters["init_mask"][:64] - init_img_url = generation_parameters["init_img"] original_bounding_box = generation_parameters["bounding_box"].copy( @@ -1096,7 +1093,6 @@ class InvokeAIWebServer: nonlocal facetool_parameters nonlocal progress - step_index = 1 nonlocal prior_variations """ @@ -1518,7 +1514,7 @@ class InvokeAIWebServer: if step_index: filename += f".{step_index}" if postprocessing: - filename += f".postprocessed" + filename += ".postprocessed" filename += ".png" diff --git a/ldm/invoke/globals.py b/ldm/invoke/globals.py index 6bfa0ecd9d..7016e8b902 100644 --- a/ldm/invoke/globals.py +++ b/ldm/invoke/globals.py @@ -33,7 +33,7 @@ Globals.models_file = 'models.yaml' Globals.models_dir = 'models' Globals.config_dir = 'configs' Globals.autoscan_dir = 'weights' -Globals.converted_ckpts_dir = 'converted-ckpts' +Globals.converted_ckpts_dir = 'converted_ckpts' # Try loading patchmatch Globals.try_patchmatch = True @@ -66,6 +66,9 @@ def global_models_dir()->Path: def global_autoscan_dir()->Path: return Path(Globals.root, Globals.autoscan_dir) +def global_converted_ckpts_dir()->Path: + return Path(global_models_dir(), Globals.converted_ckpts_dir) + def global_set_root(root_dir:Union[str,Path]): Globals.root = root_dir diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index 0e7a0747ab..99e2bdfd86 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -759,7 +759,7 @@ class ModelManager(object): return model_name = model_name or diffusers_path.name - model_description = model_description or "Optimized version of {model_name}" + model_description = model_description or f"Optimized version of {model_name}" print(f">> Optimizing {model_name} (30-60s)") try: # By passing the specified VAE too the conversion function, the autoencoder