mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
a few adjustments
- fix unused variables and f-strings found by pyflakes - use global_converted_ckpts_dir() to find location of diffusers - fixed bug in model_manager that was causing the description of converted models to read "Optimized version of {model_name}'
This commit is contained in:
parent
b87f7b1129
commit
e561d19206
@ -28,7 +28,7 @@ from ldm.invoke.args import Args, APP_ID, APP_VERSION, calculate_init_img_hash
|
|||||||
from ldm.invoke.conditioning import get_tokens_for_prompt, get_prompt_structure
|
from ldm.invoke.conditioning import get_tokens_for_prompt, get_prompt_structure
|
||||||
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
|
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
|
||||||
from ldm.invoke.generator.inpaint import infill_methods
|
from ldm.invoke.generator.inpaint import infill_methods
|
||||||
from ldm.invoke.globals import Globals
|
from ldm.invoke.globals import Globals, global_converted_ckpts_dir
|
||||||
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
from ldm.invoke.pngwriter import PngWriter, retrieve_metadata
|
||||||
from ldm.invoke.prompt_parser import split_weighted_subprompts, Blend
|
from ldm.invoke.prompt_parser import split_weighted_subprompts, Blend
|
||||||
|
|
||||||
@ -294,7 +294,7 @@ class InvokeAIWebServer:
|
|||||||
def load_socketio_listeners(self, socketio):
|
def load_socketio_listeners(self, socketio):
|
||||||
@socketio.on("requestSystemConfig")
|
@socketio.on("requestSystemConfig")
|
||||||
def handle_request_capabilities():
|
def handle_request_capabilities():
|
||||||
print(f">> System config requested")
|
print(">> System config requested")
|
||||||
config = self.get_system_config()
|
config = self.get_system_config()
|
||||||
config["model_list"] = self.generate.model_manager.list_models()
|
config["model_list"] = self.generate.model_manager.list_models()
|
||||||
config["infill_methods"] = infill_methods()
|
config["infill_methods"] = infill_methods()
|
||||||
@ -428,7 +428,7 @@ class InvokeAIWebServer:
|
|||||||
)
|
)
|
||||||
|
|
||||||
if model_to_convert['save_location'] == 'root':
|
if model_to_convert['save_location'] == 'root':
|
||||||
diffusers_path = Path(Globals.root, 'models', 'converted_ckpts', f'{model_name}_diffusers')
|
diffusers_path = Path(global_converted_ckpts_dir(), f'{model_name}_diffusers')
|
||||||
|
|
||||||
if model_to_convert['save_location'] == 'custom' and model_to_convert['custom_location'] is not None:
|
if model_to_convert['save_location'] == 'custom' and model_to_convert['custom_location'] is not None:
|
||||||
diffusers_path = Path(model_to_convert['custom_location'], f'{model_name}_diffusers')
|
diffusers_path = Path(model_to_convert['custom_location'], f'{model_name}_diffusers')
|
||||||
@ -737,7 +737,7 @@ class InvokeAIWebServer:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
seed = original_image["metadata"]["image"]["seed"]
|
seed = original_image["metadata"]["image"]["seed"]
|
||||||
except (KeyError) as e:
|
except KeyError:
|
||||||
seed = "unknown_seed"
|
seed = "unknown_seed"
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -837,7 +837,7 @@ class InvokeAIWebServer:
|
|||||||
|
|
||||||
@socketio.on("cancel")
|
@socketio.on("cancel")
|
||||||
def handle_cancel():
|
def handle_cancel():
|
||||||
print(f">> Cancel processing requested")
|
print(">> Cancel processing requested")
|
||||||
self.canceled.set()
|
self.canceled.set()
|
||||||
|
|
||||||
# TODO: I think this needs a safety mechanism.
|
# TODO: I think this needs a safety mechanism.
|
||||||
@ -919,9 +919,6 @@ class InvokeAIWebServer:
|
|||||||
So we need to convert each into a PIL Image.
|
So we need to convert each into a PIL Image.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
truncated_outpaint_image_b64 = generation_parameters["init_img"][:64]
|
|
||||||
truncated_outpaint_mask_b64 = generation_parameters["init_mask"][:64]
|
|
||||||
|
|
||||||
init_img_url = generation_parameters["init_img"]
|
init_img_url = generation_parameters["init_img"]
|
||||||
|
|
||||||
original_bounding_box = generation_parameters["bounding_box"].copy(
|
original_bounding_box = generation_parameters["bounding_box"].copy(
|
||||||
@ -1096,7 +1093,6 @@ class InvokeAIWebServer:
|
|||||||
nonlocal facetool_parameters
|
nonlocal facetool_parameters
|
||||||
nonlocal progress
|
nonlocal progress
|
||||||
|
|
||||||
step_index = 1
|
|
||||||
nonlocal prior_variations
|
nonlocal prior_variations
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@ -1518,7 +1514,7 @@ class InvokeAIWebServer:
|
|||||||
if step_index:
|
if step_index:
|
||||||
filename += f".{step_index}"
|
filename += f".{step_index}"
|
||||||
if postprocessing:
|
if postprocessing:
|
||||||
filename += f".postprocessed"
|
filename += ".postprocessed"
|
||||||
|
|
||||||
filename += ".png"
|
filename += ".png"
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ Globals.models_file = 'models.yaml'
|
|||||||
Globals.models_dir = 'models'
|
Globals.models_dir = 'models'
|
||||||
Globals.config_dir = 'configs'
|
Globals.config_dir = 'configs'
|
||||||
Globals.autoscan_dir = 'weights'
|
Globals.autoscan_dir = 'weights'
|
||||||
Globals.converted_ckpts_dir = 'converted-ckpts'
|
Globals.converted_ckpts_dir = 'converted_ckpts'
|
||||||
|
|
||||||
# Try loading patchmatch
|
# Try loading patchmatch
|
||||||
Globals.try_patchmatch = True
|
Globals.try_patchmatch = True
|
||||||
@ -66,6 +66,9 @@ def global_models_dir()->Path:
|
|||||||
def global_autoscan_dir()->Path:
|
def global_autoscan_dir()->Path:
|
||||||
return Path(Globals.root, Globals.autoscan_dir)
|
return Path(Globals.root, Globals.autoscan_dir)
|
||||||
|
|
||||||
|
def global_converted_ckpts_dir()->Path:
|
||||||
|
return Path(global_models_dir(), Globals.converted_ckpts_dir)
|
||||||
|
|
||||||
def global_set_root(root_dir:Union[str,Path]):
|
def global_set_root(root_dir:Union[str,Path]):
|
||||||
Globals.root = root_dir
|
Globals.root = root_dir
|
||||||
|
|
||||||
|
@ -759,7 +759,7 @@ class ModelManager(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
model_name = model_name or diffusers_path.name
|
model_name = model_name or diffusers_path.name
|
||||||
model_description = model_description or "Optimized version of {model_name}"
|
model_description = model_description or f"Optimized version of {model_name}"
|
||||||
print(f">> Optimizing {model_name} (30-60s)")
|
print(f">> Optimizing {model_name} (30-60s)")
|
||||||
try:
|
try:
|
||||||
# By passing the specified VAE too the conversion function, the autoencoder
|
# By passing the specified VAE too the conversion function, the autoencoder
|
||||||
|
Loading…
x
Reference in New Issue
Block a user