change "ialog" to "log"

This commit is contained in:
Lincoln Stein 2023-04-11 18:48:20 -04:00
parent f3081e7013
commit c132dbdefa
2 changed files with 136 additions and 136 deletions

View File

@ -24,7 +24,7 @@ import safetensors
import safetensors.torch import safetensors.torch
import torch import torch
import transformers import transformers
import invokeai.backend.util.logging as ialog import invokeai.backend.util.logging as log
from diffusers import ( from diffusers import (
AutoencoderKL, AutoencoderKL,
UNet2DConditionModel, UNet2DConditionModel,
@ -133,7 +133,7 @@ class ModelManager(object):
) )
if not self.valid_model(model_name): if not self.valid_model(model_name):
ialog.error( log.error(
f'"{model_name}" is not a known model name. Please check your models.yaml file' f'"{model_name}" is not a known model name. Please check your models.yaml file'
) )
return self.current_model return self.current_model
@ -145,7 +145,7 @@ class ModelManager(object):
if model_name in self.models: if model_name in self.models:
requested_model = self.models[model_name]["model"] requested_model = self.models[model_name]["model"]
ialog.info(f"Retrieving model {model_name} from system RAM cache") log.info(f"Retrieving model {model_name} from system RAM cache")
requested_model.ready() requested_model.ready()
width = self.models[model_name]["width"] width = self.models[model_name]["width"]
height = self.models[model_name]["height"] height = self.models[model_name]["height"]
@ -380,7 +380,7 @@ class ModelManager(object):
""" """
omega = self.config omega = self.config
if model_name not in omega: if model_name not in omega:
ialog.error(f"Unknown model {model_name}") log.error(f"Unknown model {model_name}")
return return
# save these for use in deletion later # save these for use in deletion later
conf = omega[model_name] conf = omega[model_name]
@ -393,13 +393,13 @@ class ModelManager(object):
self.stack.remove(model_name) self.stack.remove(model_name)
if delete_files: if delete_files:
if weights: if weights:
ialog.info(f"Deleting file {weights}") log.info(f"Deleting file {weights}")
Path(weights).unlink(missing_ok=True) Path(weights).unlink(missing_ok=True)
elif path: elif path:
ialog.info(f"Deleting directory {path}") log.info(f"Deleting directory {path}")
rmtree(path, ignore_errors=True) rmtree(path, ignore_errors=True)
elif repo_id: elif repo_id:
ialog.info(f"Deleting the cached model directory for {repo_id}") log.info(f"Deleting the cached model directory for {repo_id}")
self._delete_model_from_cache(repo_id) self._delete_model_from_cache(repo_id)
def add_model( def add_model(
@ -440,7 +440,7 @@ class ModelManager(object):
def _load_model(self, model_name: str): def _load_model(self, model_name: str):
"""Load and initialize the model from configuration variables passed at object creation time""" """Load and initialize the model from configuration variables passed at object creation time"""
if model_name not in self.config: if model_name not in self.config:
ialog.error( log.error(
f'"{model_name}" is not a known model name. Please check your models.yaml file' f'"{model_name}" is not a known model name. Please check your models.yaml file'
) )
return return
@ -458,7 +458,7 @@ class ModelManager(object):
model_format = mconfig.get("format", "ckpt") model_format = mconfig.get("format", "ckpt")
if model_format == "ckpt": if model_format == "ckpt":
weights = mconfig.weights weights = mconfig.weights
ialog.info(f"Loading {model_name} from {weights}") log.info(f"Loading {model_name} from {weights}")
model, width, height, model_hash = self._load_ckpt_model( model, width, height, model_hash = self._load_ckpt_model(
model_name, mconfig model_name, mconfig
) )
@ -474,13 +474,13 @@ class ModelManager(object):
# usage statistics # usage statistics
toc = time.time() toc = time.time()
ialog.info("Model loaded in " + "%4.2fs" % (toc - tic)) log.info("Model loaded in " + "%4.2fs" % (toc - tic))
if self._has_cuda(): if self._has_cuda():
ialog.info( log.info(
"Max VRAM used to load the model: "+ "Max VRAM used to load the model: "+
"%4.2fG" % (torch.cuda.max_memory_allocated() / 1e9) "%4.2fG" % (torch.cuda.max_memory_allocated() / 1e9)
) )
ialog.info( log.info(
"Current VRAM usage: "+ "Current VRAM usage: "+
"%4.2fG" % (torch.cuda.memory_allocated() / 1e9) "%4.2fG" % (torch.cuda.memory_allocated() / 1e9)
) )
@ -490,11 +490,11 @@ class ModelManager(object):
name_or_path = self.model_name_or_path(mconfig) name_or_path = self.model_name_or_path(mconfig)
using_fp16 = self.precision == "float16" using_fp16 = self.precision == "float16"
ialog.info(f"Loading diffusers model from {name_or_path}") log.info(f"Loading diffusers model from {name_or_path}")
if using_fp16: if using_fp16:
ialog.debug("Using faster float16 precision") log.debug("Using faster float16 precision")
else: else:
ialog.debug("Using more accurate float32 precision") log.debug("Using more accurate float32 precision")
# TODO: scan weights maybe? # TODO: scan weights maybe?
pipeline_args: dict[str, Any] = dict( pipeline_args: dict[str, Any] = dict(
@ -526,7 +526,7 @@ class ModelManager(object):
if str(e).startswith("fp16 is not a valid"): if str(e).startswith("fp16 is not a valid"):
pass pass
else: else:
ialog.error( log.error(
f"An unexpected error occurred while downloading the model: {e})" f"An unexpected error occurred while downloading the model: {e})"
) )
if pipeline: if pipeline:
@ -545,7 +545,7 @@ class ModelManager(object):
# square images??? # square images???
width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor
height = width height = width
ialog.debug(f"Default image dimensions = {width} x {height}") log.debug(f"Default image dimensions = {width} x {height}")
return pipeline, width, height, model_hash return pipeline, width, height, model_hash
@ -562,7 +562,7 @@ class ModelManager(object):
weights = os.path.normpath(os.path.join(Globals.root, weights)) weights = os.path.normpath(os.path.join(Globals.root, weights))
# Convert to diffusers and return a diffusers pipeline # Convert to diffusers and return a diffusers pipeline
ialog.info(f"Converting legacy checkpoint {model_name} into a diffusers model...") log.info(f"Converting legacy checkpoint {model_name} into a diffusers model...")
from . import load_pipeline_from_original_stable_diffusion_ckpt from . import load_pipeline_from_original_stable_diffusion_ckpt
@ -627,7 +627,7 @@ class ModelManager(object):
if model_name not in self.models: if model_name not in self.models:
return return
ialog.info(f"Offloading {model_name} to CPU") log.info(f"Offloading {model_name} to CPU")
model = self.models[model_name]["model"] model = self.models[model_name]["model"]
model.offload_all() model.offload_all()
self.current_model = None self.current_model = None
@ -643,26 +643,26 @@ class ModelManager(object):
and option to exit if an infected file is identified. and option to exit if an infected file is identified.
""" """
# scan model # scan model
ialog.debug(f"Scanning Model: {model_name}") log.debug(f"Scanning Model: {model_name}")
scan_result = scan_file_path(checkpoint) scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0: if scan_result.infected_files != 0:
if scan_result.infected_files == 1: if scan_result.infected_files == 1:
ialog.critical(f"Issues Found In Model: {scan_result.issues_count}") log.critical(f"Issues Found In Model: {scan_result.issues_count}")
ialog.critical("The model you are trying to load seems to be infected.") log.critical("The model you are trying to load seems to be infected.")
ialog.critical("For your safety, InvokeAI will not load this model.") log.critical("For your safety, InvokeAI will not load this model.")
ialog.critical("Please use checkpoints from trusted sources.") log.critical("Please use checkpoints from trusted sources.")
ialog.critical("Exiting InvokeAI") log.critical("Exiting InvokeAI")
sys.exit() sys.exit()
else: else:
ialog.warning("InvokeAI was unable to scan the model you are using.") log.warning("InvokeAI was unable to scan the model you are using.")
model_safe_check_fail = ask_user( model_safe_check_fail = ask_user(
"Do you want to to continue loading the model?", ["y", "n"] "Do you want to to continue loading the model?", ["y", "n"]
) )
if model_safe_check_fail.lower() != "y": if model_safe_check_fail.lower() != "y":
ialog.critical("Exiting InvokeAI") log.critical("Exiting InvokeAI")
sys.exit() sys.exit()
else: else:
ialog.debug("Model scanned ok") log.debug("Model scanned ok")
def import_diffuser_model( def import_diffuser_model(
self, self,
@ -779,24 +779,24 @@ class ModelManager(object):
model_path: Path = None model_path: Path = None
thing = path_url_or_repo # to save typing thing = path_url_or_repo # to save typing
ialog.info(f"Probing {thing} for import") log.info(f"Probing {thing} for import")
if thing.startswith(("http:", "https:", "ftp:")): if thing.startswith(("http:", "https:", "ftp:")):
ialog.info(f"{thing} appears to be a URL") log.info(f"{thing} appears to be a URL")
model_path = self._resolve_path( model_path = self._resolve_path(
thing, "models/ldm/stable-diffusion-v1" thing, "models/ldm/stable-diffusion-v1"
) # _resolve_path does a download if needed ) # _resolve_path does a download if needed
elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")): elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")):
if Path(thing).stem in ["model", "diffusion_pytorch_model"]: if Path(thing).stem in ["model", "diffusion_pytorch_model"]:
ialog.debug(f"{Path(thing).name} appears to be part of a diffusers model. Skipping import") log.debug(f"{Path(thing).name} appears to be part of a diffusers model. Skipping import")
return return
else: else:
ialog.debug(f"{thing} appears to be a checkpoint file on disk") log.debug(f"{thing} appears to be a checkpoint file on disk")
model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1") model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1")
elif Path(thing).is_dir() and Path(thing, "model_index.json").exists(): elif Path(thing).is_dir() and Path(thing, "model_index.json").exists():
ialog.debug(f"{thing} appears to be a diffusers file on disk") log.debug(f"{thing} appears to be a diffusers file on disk")
model_name = self.import_diffuser_model( model_name = self.import_diffuser_model(
thing, thing,
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
@ -807,30 +807,30 @@ class ModelManager(object):
elif Path(thing).is_dir(): elif Path(thing).is_dir():
if (Path(thing) / "model_index.json").exists(): if (Path(thing) / "model_index.json").exists():
ialog.debug(f"{thing} appears to be a diffusers model.") log.debug(f"{thing} appears to be a diffusers model.")
model_name = self.import_diffuser_model( model_name = self.import_diffuser_model(
thing, commit_to_conf=commit_to_conf thing, commit_to_conf=commit_to_conf
) )
else: else:
ialog.debug(f"{thing} appears to be a directory. Will scan for models to import") log.debug(f"{thing} appears to be a directory. Will scan for models to import")
for m in list(Path(thing).rglob("*.ckpt")) + list( for m in list(Path(thing).rglob("*.ckpt")) + list(
Path(thing).rglob("*.safetensors") Path(thing).rglob("*.safetensors")
): ):
if model_name := self.heuristic_import( if model_name := self.heuristic_import(
str(m), commit_to_conf=commit_to_conf str(m), commit_to_conf=commit_to_conf
): ):
ialog.info(f"{model_name} successfully imported") log.info(f"{model_name} successfully imported")
return model_name return model_name
elif re.match(r"^[\w.+-]+/[\w.+-]+$", thing): elif re.match(r"^[\w.+-]+/[\w.+-]+$", thing):
ialog.debug(f"{thing} appears to be a HuggingFace diffusers repo_id") log.debug(f"{thing} appears to be a HuggingFace diffusers repo_id")
model_name = self.import_diffuser_model( model_name = self.import_diffuser_model(
thing, commit_to_conf=commit_to_conf thing, commit_to_conf=commit_to_conf
) )
pipeline, _, _, _ = self._load_diffusers_model(self.config[model_name]) pipeline, _, _, _ = self._load_diffusers_model(self.config[model_name])
return model_name return model_name
else: else:
ialog.warning(f"{thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id") log.warning(f"{thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id")
# Model_path is set in the event of a legacy checkpoint file. # Model_path is set in the event of a legacy checkpoint file.
# If not set, we're all done # If not set, we're all done
@ -838,7 +838,7 @@ class ModelManager(object):
return return
if model_path.stem in self.config: # already imported if model_path.stem in self.config: # already imported
ialog.debug("Already imported. Skipping") log.debug("Already imported. Skipping")
return model_path.stem return model_path.stem
# another round of heuristics to guess the correct config file. # another round of heuristics to guess the correct config file.
@ -854,38 +854,38 @@ class ModelManager(object):
# look for a like-named .yaml file in same directory # look for a like-named .yaml file in same directory
if model_path.with_suffix(".yaml").exists(): if model_path.with_suffix(".yaml").exists():
model_config_file = model_path.with_suffix(".yaml") model_config_file = model_path.with_suffix(".yaml")
ialog.debug(f"Using config file {model_config_file.name}") log.debug(f"Using config file {model_config_file.name}")
else: else:
model_type = self.probe_model_type(checkpoint) model_type = self.probe_model_type(checkpoint)
if model_type == SDLegacyType.V1: if model_type == SDLegacyType.V1:
ialog.debug("SD-v1 model detected") log.debug("SD-v1 model detected")
model_config_file = Path( model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inference.yaml" Globals.root, "configs/stable-diffusion/v1-inference.yaml"
) )
elif model_type == SDLegacyType.V1_INPAINT: elif model_type == SDLegacyType.V1_INPAINT:
ialog.debug("SD-v1 inpainting model detected") log.debug("SD-v1 inpainting model detected")
model_config_file = Path( model_config_file = Path(
Globals.root, Globals.root,
"configs/stable-diffusion/v1-inpainting-inference.yaml", "configs/stable-diffusion/v1-inpainting-inference.yaml",
) )
elif model_type == SDLegacyType.V2_v: elif model_type == SDLegacyType.V2_v:
ialog.debug("SD-v2-v model detected") log.debug("SD-v2-v model detected")
model_config_file = Path( model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml" Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
) )
elif model_type == SDLegacyType.V2_e: elif model_type == SDLegacyType.V2_e:
ialog.debug("SD-v2-e model detected") log.debug("SD-v2-e model detected")
model_config_file = Path( model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference.yaml" Globals.root, "configs/stable-diffusion/v2-inference.yaml"
) )
elif model_type == SDLegacyType.V2: elif model_type == SDLegacyType.V2:
ialog.warning( log.warning(
f"{thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path." f"{thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path."
) )
return return
else: else:
ialog.warning( log.warning(
f"{thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide configuration file path." f"{thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide configuration file path."
) )
return return
@ -902,7 +902,7 @@ class ModelManager(object):
for suffix in ["pt", "ckpt", "safetensors"]: for suffix in ["pt", "ckpt", "safetensors"]:
if (model_path.with_suffix(f".vae.{suffix}")).exists(): if (model_path.with_suffix(f".vae.{suffix}")).exists():
vae_path = model_path.with_suffix(f".vae.{suffix}") vae_path = model_path.with_suffix(f".vae.{suffix}")
ialog.debug(f"Using VAE file {vae_path.name}") log.debug(f"Using VAE file {vae_path.name}")
vae = None if vae_path else dict(repo_id="stabilityai/sd-vae-ft-mse") vae = None if vae_path else dict(repo_id="stabilityai/sd-vae-ft-mse")
diffuser_path = Path( diffuser_path = Path(
@ -948,14 +948,14 @@ class ModelManager(object):
from . import convert_ckpt_to_diffusers from . import convert_ckpt_to_diffusers
if diffusers_path.exists(): if diffusers_path.exists():
ialog.error( log.error(
f"The path {str(diffusers_path)} already exists. Please move or remove it and try again." f"The path {str(diffusers_path)} already exists. Please move or remove it and try again."
) )
return return
model_name = model_name or diffusers_path.name model_name = model_name or diffusers_path.name
model_description = model_description or f"Converted version of {model_name}" model_description = model_description or f"Converted version of {model_name}"
ialog.debug(f"Converting {model_name} to diffusers (30-60s)") log.debug(f"Converting {model_name} to diffusers (30-60s)")
try: try:
# By passing the specified VAE to the conversion function, the autoencoder # By passing the specified VAE to the conversion function, the autoencoder
# will be built into the model rather than tacked on afterward via the config file # will be built into the model rather than tacked on afterward via the config file
@ -972,10 +972,10 @@ class ModelManager(object):
vae_path=vae_path, vae_path=vae_path,
scan_needed=scan_needed, scan_needed=scan_needed,
) )
ialog.debug( log.debug(
f"Success. Converted model is now located at {str(diffusers_path)}" f"Success. Converted model is now located at {str(diffusers_path)}"
) )
ialog.debug(f"Writing new config file entry for {model_name}") log.debug(f"Writing new config file entry for {model_name}")
new_config = dict( new_config = dict(
path=str(diffusers_path), path=str(diffusers_path),
description=model_description, description=model_description,
@ -986,17 +986,17 @@ class ModelManager(object):
self.add_model(model_name, new_config, True) self.add_model(model_name, new_config, True)
if commit_to_conf: if commit_to_conf:
self.commit(commit_to_conf) self.commit(commit_to_conf)
ialog.debug("Conversion succeeded") log.debug("Conversion succeeded")
except Exception as e: except Exception as e:
ialog.warning(f"Conversion failed: {str(e)}") log.warning(f"Conversion failed: {str(e)}")
ialog.warning( log.warning(
"If you are trying to convert an inpainting or 2.X model, please indicate the correct config file (e.g. v1-inpainting-inference.yaml)" "If you are trying to convert an inpainting or 2.X model, please indicate the correct config file (e.g. v1-inpainting-inference.yaml)"
) )
return model_name return model_name
def search_models(self, search_folder): def search_models(self, search_folder):
ialog.info(f"Finding Models In: {search_folder}") log.info(f"Finding Models In: {search_folder}")
models_folder_ckpt = Path(search_folder).glob("**/*.ckpt") models_folder_ckpt = Path(search_folder).glob("**/*.ckpt")
models_folder_safetensors = Path(search_folder).glob("**/*.safetensors") models_folder_safetensors = Path(search_folder).glob("**/*.safetensors")
@ -1020,7 +1020,7 @@ class ModelManager(object):
num_loaded_models = len(self.models) num_loaded_models = len(self.models)
if num_loaded_models >= self.max_loaded_models: if num_loaded_models >= self.max_loaded_models:
least_recent_model = self._pop_oldest_model() least_recent_model = self._pop_oldest_model()
ialog.info( log.info(
f"Cache limit (max={self.max_loaded_models}) reached. Purging {least_recent_model}" f"Cache limit (max={self.max_loaded_models}) reached. Purging {least_recent_model}"
) )
if least_recent_model is not None: if least_recent_model is not None:
@ -1029,7 +1029,7 @@ class ModelManager(object):
def print_vram_usage(self) -> None: def print_vram_usage(self) -> None:
if self._has_cuda: if self._has_cuda:
ialog.info( log.info(
"Current VRAM usage:"+ "Current VRAM usage:"+
"%4.2fG" % (torch.cuda.memory_allocated() / 1e9), "%4.2fG" % (torch.cuda.memory_allocated() / 1e9),
) )
@ -1119,10 +1119,10 @@ class ModelManager(object):
dest = hub / model.stem dest = hub / model.stem
if dest.exists() and not source.exists(): if dest.exists() and not source.exists():
continue continue
ialog.info(f"{source} => {dest}") log.info(f"{source} => {dest}")
if source.exists(): if source.exists():
if dest.is_symlink(): if dest.is_symlink():
ialog.warning(f"Found symlink at {dest.name}. Not migrating.") log.warning(f"Found symlink at {dest.name}. Not migrating.")
elif dest.exists(): elif dest.exists():
if source.is_dir(): if source.is_dir():
rmtree(source) rmtree(source)
@ -1139,7 +1139,7 @@ class ModelManager(object):
] ]
for d in empty: for d in empty:
os.rmdir(d) os.rmdir(d)
ialog.info("Migration is done. Continuing...") log.info("Migration is done. Continuing...")
def _resolve_path( def _resolve_path(
self, source: Union[str, Path], dest_directory: str self, source: Union[str, Path], dest_directory: str
@ -1182,14 +1182,14 @@ class ModelManager(object):
def _add_embeddings_to_model(self, model: StableDiffusionGeneratorPipeline): def _add_embeddings_to_model(self, model: StableDiffusionGeneratorPipeline):
if self.embedding_path is not None: if self.embedding_path is not None:
ialog.info(f"Loading embeddings from {self.embedding_path}") log.info(f"Loading embeddings from {self.embedding_path}")
for root, _, files in os.walk(self.embedding_path): for root, _, files in os.walk(self.embedding_path):
for name in files: for name in files:
ti_path = os.path.join(root, name) ti_path = os.path.join(root, name)
model.textual_inversion_manager.load_textual_inversion( model.textual_inversion_manager.load_textual_inversion(
ti_path, defer_injecting_tokens=True ti_path, defer_injecting_tokens=True
) )
ialog.info( log.info(
f'Textual inversion triggers: {", ".join(sorted(model.textual_inversion_manager.get_all_trigger_strings()))}' f'Textual inversion triggers: {", ".join(sorted(model.textual_inversion_manager.get_all_trigger_strings()))}'
) )
@ -1212,7 +1212,7 @@ class ModelManager(object):
with open(hashpath) as f: with open(hashpath) as f:
hash = f.read() hash = f.read()
return hash return hash
ialog.debug("Calculating sha256 hash of model files") log.debug("Calculating sha256 hash of model files")
tic = time.time() tic = time.time()
sha = hashlib.sha256() sha = hashlib.sha256()
count = 0 count = 0
@ -1224,7 +1224,7 @@ class ModelManager(object):
sha.update(chunk) sha.update(chunk)
hash = sha.hexdigest() hash = sha.hexdigest()
toc = time.time() toc = time.time()
ialog.debug(f"sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic)) log.debug(f"sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
with open(hashpath, "w") as f: with open(hashpath, "w") as f:
f.write(hash) f.write(hash)
return hash return hash
@ -1242,13 +1242,13 @@ class ModelManager(object):
hash = f.read() hash = f.read()
return hash return hash
ialog.debug("Calculating sha256 hash of weights file") log.debug("Calculating sha256 hash of weights file")
tic = time.time() tic = time.time()
sha = hashlib.sha256() sha = hashlib.sha256()
sha.update(data) sha.update(data)
hash = sha.hexdigest() hash = sha.hexdigest()
toc = time.time() toc = time.time()
ialog.debug(f"sha256 = {hash} "+"(%4.2fs)" % (toc - tic)) log.debug(f"sha256 = {hash} "+"(%4.2fs)" % (toc - tic))
with open(hashpath, "w") as f: with open(hashpath, "w") as f:
f.write(hash) f.write(hash)
@ -1269,12 +1269,12 @@ class ModelManager(object):
local_files_only=not Globals.internet_available, local_files_only=not Globals.internet_available,
) )
ialog.debug(f"Loading diffusers VAE from {name_or_path}") log.debug(f"Loading diffusers VAE from {name_or_path}")
if using_fp16: if using_fp16:
vae_args.update(torch_dtype=torch.float16) vae_args.update(torch_dtype=torch.float16)
fp_args_list = [{"revision": "fp16"}, {}] fp_args_list = [{"revision": "fp16"}, {}]
else: else:
ialog.debug("Using more accurate float32 precision") log.debug("Using more accurate float32 precision")
fp_args_list = [{}] fp_args_list = [{}]
vae = None vae = None
@ -1298,7 +1298,7 @@ class ModelManager(object):
break break
if not vae and deferred_error: if not vae and deferred_error:
ialog.warning(f"Could not load VAE {name_or_path}: {str(deferred_error)}") log.warning(f"Could not load VAE {name_or_path}: {str(deferred_error)}")
return vae return vae
@ -1314,7 +1314,7 @@ class ModelManager(object):
for revision in repo.revisions: for revision in repo.revisions:
hashes_to_delete.add(revision.commit_hash) hashes_to_delete.add(revision.commit_hash)
strategy = cache_info.delete_revisions(*hashes_to_delete) strategy = cache_info.delete_revisions(*hashes_to_delete)
ialog.warning( log.warning(
f"Deletion of this model is expected to free {strategy.expected_freed_size_str}" f"Deletion of this model is expected to free {strategy.expected_freed_size_str}"
) )
strategy.execute() strategy.execute()

View File

@ -16,7 +16,7 @@ if sys.platform == "darwin":
import pyparsing # type: ignore import pyparsing # type: ignore
import invokeai.version as invokeai import invokeai.version as invokeai
import invokeai.backend.util.logging as ialog import invokeai.backend.util.logging as log
from ...backend import Generate, ModelManager from ...backend import Generate, ModelManager
from ...backend.args import Args, dream_cmd_from_png, metadata_dumps, metadata_from_png from ...backend.args import Args, dream_cmd_from_png, metadata_dumps, metadata_from_png
@ -70,7 +70,7 @@ def main():
# run any post-install patches needed # run any post-install patches needed
run_patches() run_patches()
ialog.info(f"Internet connectivity is {Globals.internet_available}") log.info(f"Internet connectivity is {Globals.internet_available}")
if not args.conf: if not args.conf:
config_file = os.path.join(Globals.root, "configs", "models.yaml") config_file = os.path.join(Globals.root, "configs", "models.yaml")
@ -79,8 +79,8 @@ def main():
opt, FileNotFoundError(f"The file {config_file} could not be found.") opt, FileNotFoundError(f"The file {config_file} could not be found.")
) )
ialog.info(f"{invokeai.__app_name__}, version {invokeai.__version__}") log.info(f"{invokeai.__app_name__}, version {invokeai.__version__}")
ialog.info(f'InvokeAI runtime directory is "{Globals.root}"') log.info(f'InvokeAI runtime directory is "{Globals.root}"')
# loading here to avoid long delays on startup # loading here to avoid long delays on startup
# these two lines prevent a horrible warning message from appearing # these two lines prevent a horrible warning message from appearing
@ -122,7 +122,7 @@ def main():
else: else:
raise FileNotFoundError(f"{opt.infile} not found.") raise FileNotFoundError(f"{opt.infile} not found.")
except (FileNotFoundError, IOError) as e: except (FileNotFoundError, IOError) as e:
ialog.critical('Aborted',exc_info=True) log.critical('Aborted',exc_info=True)
sys.exit(-1) sys.exit(-1)
# creating a Generate object: # creating a Generate object:
@ -144,11 +144,11 @@ def main():
except (FileNotFoundError, TypeError, AssertionError) as e: except (FileNotFoundError, TypeError, AssertionError) as e:
report_model_error(opt, e) report_model_error(opt, e)
except (IOError, KeyError): except (IOError, KeyError):
ialog.critical("Aborted",exc_info=True) log.critical("Aborted",exc_info=True)
sys.exit(-1) sys.exit(-1)
if opt.seamless: if opt.seamless:
ialog.info("Changed to seamless tiling mode") log.info("Changed to seamless tiling mode")
# preload the model # preload the model
try: try:
@ -181,7 +181,7 @@ def main():
f'\nGoodbye!\nYou can start InvokeAI again by running the "invoke.bat" (or "invoke.sh") script from {Globals.root}' f'\nGoodbye!\nYou can start InvokeAI again by running the "invoke.bat" (or "invoke.sh") script from {Globals.root}'
) )
except Exception: except Exception:
ialog.error("An error occurred",exc_info=True) log.error("An error occurred",exc_info=True)
# TODO: main_loop() has gotten busy. Needs to be refactored. # TODO: main_loop() has gotten busy. Needs to be refactored.
def main_loop(gen, opt): def main_loop(gen, opt):
@ -247,7 +247,7 @@ def main_loop(gen, opt):
if not opt.prompt: if not opt.prompt:
oldargs = metadata_from_png(opt.init_img) oldargs = metadata_from_png(opt.init_img)
opt.prompt = oldargs.prompt opt.prompt = oldargs.prompt
ialog.info(f'Retrieved old prompt "{opt.prompt}" from {opt.init_img}') log.info(f'Retrieved old prompt "{opt.prompt}" from {opt.init_img}')
except (OSError, AttributeError, KeyError): except (OSError, AttributeError, KeyError):
pass pass
@ -264,9 +264,9 @@ def main_loop(gen, opt):
if opt.init_img is not None and re.match("^-\\d+$", opt.init_img): if opt.init_img is not None and re.match("^-\\d+$", opt.init_img):
try: try:
opt.init_img = last_results[int(opt.init_img)][0] opt.init_img = last_results[int(opt.init_img)][0]
ialog.info(f"Reusing previous image {opt.init_img}") log.info(f"Reusing previous image {opt.init_img}")
except IndexError: except IndexError:
ialog.info(f"No previous initial image at position {opt.init_img} found") log.info(f"No previous initial image at position {opt.init_img} found")
opt.init_img = None opt.init_img = None
continue continue
@ -287,9 +287,9 @@ def main_loop(gen, opt):
if opt.seed is not None and opt.seed < 0 and operation != "postprocess": if opt.seed is not None and opt.seed < 0 and operation != "postprocess":
try: try:
opt.seed = last_results[opt.seed][1] opt.seed = last_results[opt.seed][1]
ialog.info(f"Reusing previous seed {opt.seed}") log.info(f"Reusing previous seed {opt.seed}")
except IndexError: except IndexError:
ialog.info(f"No previous seed at position {opt.seed} found") log.info(f"No previous seed at position {opt.seed} found")
opt.seed = None opt.seed = None
continue continue
@ -308,7 +308,7 @@ def main_loop(gen, opt):
subdir = subdir[: (path_max - 39 - len(os.path.abspath(opt.outdir)))] subdir = subdir[: (path_max - 39 - len(os.path.abspath(opt.outdir)))]
current_outdir = os.path.join(opt.outdir, subdir) current_outdir = os.path.join(opt.outdir, subdir)
ialog.info('Writing files to directory: "' + current_outdir + '"') log.info('Writing files to directory: "' + current_outdir + '"')
# make sure the output directory exists # make sure the output directory exists
if not os.path.exists(current_outdir): if not os.path.exists(current_outdir):
@ -438,13 +438,13 @@ def main_loop(gen, opt):
**vars(opt), **vars(opt),
) )
except (PromptParser.ParsingException, pyparsing.ParseException): except (PromptParser.ParsingException, pyparsing.ParseException):
ialog.error("An error occurred while processing your prompt",exc_info=True) log.error("An error occurred while processing your prompt",exc_info=True)
elif operation == "postprocess": elif operation == "postprocess":
ialog.info(f"fixing {opt.prompt}") log.info(f"fixing {opt.prompt}")
opt.last_operation = do_postprocess(gen, opt, image_writer) opt.last_operation = do_postprocess(gen, opt, image_writer)
elif operation == "mask": elif operation == "mask":
ialog.info(f"generating masks from {opt.prompt}") log.info(f"generating masks from {opt.prompt}")
do_textmask(gen, opt, image_writer) do_textmask(gen, opt, image_writer)
if opt.grid and len(grid_images) > 0: if opt.grid and len(grid_images) > 0:
@ -468,11 +468,11 @@ def main_loop(gen, opt):
results = [[path, formatted_dream_prompt]] results = [[path, formatted_dream_prompt]]
except AssertionError: except AssertionError:
ialog.error(e) log.error(e)
continue continue
except OSError as e: except OSError as e:
ialog.error(e) log.error(e)
continue continue
print("Outputs:") print("Outputs:")
@ -511,7 +511,7 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple:
gen.set_model(model_name) gen.set_model(model_name)
add_embedding_terms(gen, completer) add_embedding_terms(gen, completer)
except KeyError as e: except KeyError as e:
ialog.error(e) log.error(e)
except Exception as e: except Exception as e:
report_model_error(opt, e) report_model_error(opt, e)
completer.add_history(command) completer.add_history(command)
@ -525,7 +525,7 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple:
elif command.startswith("!import"): elif command.startswith("!import"):
path = shlex.split(command) path = shlex.split(command)
if len(path) < 2: if len(path) < 2:
ialog.warning( log.warning(
"please provide (1) a URL to a .ckpt file to import; (2) a local path to a .ckpt file; or (3) a diffusers repository id in the form stabilityai/stable-diffusion-2-1" "please provide (1) a URL to a .ckpt file to import; (2) a local path to a .ckpt file; or (3) a diffusers repository id in the form stabilityai/stable-diffusion-2-1"
) )
else: else:
@ -539,7 +539,7 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple:
elif command.startswith(("!convert", "!optimize")): elif command.startswith(("!convert", "!optimize")):
path = shlex.split(command) path = shlex.split(command)
if len(path) < 2: if len(path) < 2:
ialog.warning("please provide the path to a .ckpt or .safetensors model") log.warning("please provide the path to a .ckpt or .safetensors model")
else: else:
try: try:
convert_model(path[1], gen, opt, completer) convert_model(path[1], gen, opt, completer)
@ -551,7 +551,7 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple:
elif command.startswith("!edit"): elif command.startswith("!edit"):
path = shlex.split(command) path = shlex.split(command)
if len(path) < 2: if len(path) < 2:
ialog.warning("please provide the name of a model") log.warning("please provide the name of a model")
else: else:
edit_model(path[1], gen, opt, completer) edit_model(path[1], gen, opt, completer)
completer.add_history(command) completer.add_history(command)
@ -560,7 +560,7 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple:
elif command.startswith("!del"): elif command.startswith("!del"):
path = shlex.split(command) path = shlex.split(command)
if len(path) < 2: if len(path) < 2:
ialog.warning("please provide the name of a model") log.warning("please provide the name of a model")
else: else:
del_config(path[1], gen, opt, completer) del_config(path[1], gen, opt, completer)
completer.add_history(command) completer.add_history(command)
@ -641,7 +641,7 @@ def import_model(model_path: str, gen, opt, completer):
default_name = url_attachment_name(model_path) default_name = url_attachment_name(model_path)
default_name = Path(default_name).stem default_name = Path(default_name).stem
except Exception: except Exception:
ialog.warning(f"A problem occurred while assigning the name of the downloaded model",exc_info=True) log.warning(f"A problem occurred while assigning the name of the downloaded model",exc_info=True)
model_name, model_desc = _get_model_name_and_desc( model_name, model_desc = _get_model_name_and_desc(
gen.model_manager, gen.model_manager,
completer, completer,
@ -662,11 +662,11 @@ def import_model(model_path: str, gen, opt, completer):
model_config_file=config_file, model_config_file=config_file,
) )
if not imported_name: if not imported_name:
ialog.error("Aborting import.") log.error("Aborting import.")
return return
if not _verify_load(imported_name, gen): if not _verify_load(imported_name, gen):
ialog.error("model failed to load. Discarding configuration entry") log.error("model failed to load. Discarding configuration entry")
gen.model_manager.del_model(imported_name) gen.model_manager.del_model(imported_name)
return return
if click.confirm("Make this the default model?", default=False): if click.confirm("Make this the default model?", default=False):
@ -674,7 +674,7 @@ def import_model(model_path: str, gen, opt, completer):
gen.model_manager.commit(opt.conf) gen.model_manager.commit(opt.conf)
completer.update_models(gen.model_manager.list_models()) completer.update_models(gen.model_manager.list_models())
ialog.info(f"{imported_name} successfully installed") log.info(f"{imported_name} successfully installed")
def _pick_configuration_file(completer)->Path: def _pick_configuration_file(completer)->Path:
print( print(
@ -718,21 +718,21 @@ Please select the type of this model:
return choice return choice
def _verify_load(model_name: str, gen) -> bool: def _verify_load(model_name: str, gen) -> bool:
ialog.info("Verifying that new model loads...") log.info("Verifying that new model loads...")
current_model = gen.model_name current_model = gen.model_name
try: try:
if not gen.set_model(model_name): if not gen.set_model(model_name):
return return
except Exception as e: except Exception as e:
ialog.warning(f"model failed to load: {str(e)}") log.warning(f"model failed to load: {str(e)}")
ialog.warning( log.warning(
"** note that importing 2.X checkpoints is not supported. Please use !convert_model instead." "** note that importing 2.X checkpoints is not supported. Please use !convert_model instead."
) )
return False return False
if click.confirm("Keep model loaded?", default=True): if click.confirm("Keep model loaded?", default=True):
gen.set_model(model_name) gen.set_model(model_name)
else: else:
ialog.info("Restoring previous model") log.info("Restoring previous model")
gen.set_model(current_model) gen.set_model(current_model)
return True return True
@ -755,7 +755,7 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
ckpt_path = None ckpt_path = None
original_config_file = None original_config_file = None
if model_name_or_path == gen.model_name: if model_name_or_path == gen.model_name:
ialog.warning("Can't convert the active model. !switch to another model first. **") log.warning("Can't convert the active model. !switch to another model first. **")
return return
elif model_info := manager.model_info(model_name_or_path): elif model_info := manager.model_info(model_name_or_path):
if "weights" in model_info: if "weights" in model_info:
@ -765,7 +765,7 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
model_description = model_info["description"] model_description = model_info["description"]
vae_path = model_info.get("vae") vae_path = model_info.get("vae")
else: else:
ialog.warning(f"{model_name_or_path} is not a legacy .ckpt weights file") log.warning(f"{model_name_or_path} is not a legacy .ckpt weights file")
return return
model_name = manager.convert_and_import( model_name = manager.convert_and_import(
ckpt_path, ckpt_path,
@ -786,16 +786,16 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
manager.commit(opt.conf) manager.commit(opt.conf)
if click.confirm(f"Delete the original .ckpt file at {ckpt_path}?", default=False): if click.confirm(f"Delete the original .ckpt file at {ckpt_path}?", default=False):
ckpt_path.unlink(missing_ok=True) ckpt_path.unlink(missing_ok=True)
ialog.warning(f"{ckpt_path} deleted") log.warning(f"{ckpt_path} deleted")
def del_config(model_name: str, gen, opt, completer): def del_config(model_name: str, gen, opt, completer):
current_model = gen.model_name current_model = gen.model_name
if model_name == current_model: if model_name == current_model:
ialog.warning("Can't delete active model. !switch to another model first. **") log.warning("Can't delete active model. !switch to another model first. **")
return return
if model_name not in gen.model_manager.config: if model_name not in gen.model_manager.config:
ialog.warning(f"Unknown model {model_name}") log.warning(f"Unknown model {model_name}")
return return
if not click.confirm( if not click.confirm(
@ -808,17 +808,17 @@ def del_config(model_name: str, gen, opt, completer):
) )
gen.model_manager.del_model(model_name, delete_files=delete_completely) gen.model_manager.del_model(model_name, delete_files=delete_completely)
gen.model_manager.commit(opt.conf) gen.model_manager.commit(opt.conf)
ialog.warning(f"{model_name} deleted") log.warning(f"{model_name} deleted")
completer.update_models(gen.model_manager.list_models()) completer.update_models(gen.model_manager.list_models())
def edit_model(model_name: str, gen, opt, completer): def edit_model(model_name: str, gen, opt, completer):
manager = gen.model_manager manager = gen.model_manager
if not (info := manager.model_info(model_name)): if not (info := manager.model_info(model_name)):
ialog.warning(f"** Unknown model {model_name}") log.warning(f"** Unknown model {model_name}")
return return
print() print()
ialog.info(f"Editing model {model_name} from configuration file {opt.conf}") log.info(f"Editing model {model_name} from configuration file {opt.conf}")
new_name = _get_model_name(manager.list_models(), completer, model_name) new_name = _get_model_name(manager.list_models(), completer, model_name)
for attribute in info.keys(): for attribute in info.keys():
@ -856,7 +856,7 @@ def edit_model(model_name: str, gen, opt, completer):
manager.set_default_model(new_name) manager.set_default_model(new_name)
manager.commit(opt.conf) manager.commit(opt.conf)
completer.update_models(manager.list_models()) completer.update_models(manager.list_models())
ialog.info("Model successfully updated") log.info("Model successfully updated")
def _get_model_name(existing_names, completer, default_name: str = "") -> str: def _get_model_name(existing_names, completer, default_name: str = "") -> str:
@ -867,11 +867,11 @@ def _get_model_name(existing_names, completer, default_name: str = "") -> str:
if len(model_name) == 0: if len(model_name) == 0:
model_name = default_name model_name = default_name
if not re.match("^[\w._+:/-]+$", model_name): if not re.match("^[\w._+:/-]+$", model_name):
ialog.warning( log.warning(
'model name must contain only words, digits and the characters "._+:/-" **' 'model name must contain only words, digits and the characters "._+:/-" **'
) )
elif model_name != default_name and model_name in existing_names: elif model_name != default_name and model_name in existing_names:
ialog.warning(f"the name {model_name} is already in use. Pick another.") log.warning(f"the name {model_name} is already in use. Pick another.")
else: else:
done = True done = True
return model_name return model_name
@ -938,10 +938,10 @@ def do_postprocess(gen, opt, callback):
opt=opt, opt=opt,
) )
except OSError: except OSError:
ialog.error(f"{file_path}: file could not be read",exc_info=True) log.error(f"{file_path}: file could not be read",exc_info=True)
return return
except (KeyError, AttributeError): except (KeyError, AttributeError):
ialog.error(f"an error occurred while applying the {tool} postprocessor",exc_info=True) log.error(f"an error occurred while applying the {tool} postprocessor",exc_info=True)
return return
return opt.last_operation return opt.last_operation
@ -996,12 +996,12 @@ def prepare_image_metadata(
try: try:
filename = opt.fnformat.format(**wildcards) filename = opt.fnformat.format(**wildcards)
except KeyError as e: except KeyError as e:
ialog.error( log.error(
f"The filename format contains an unknown key '{e.args[0]}'. Will use {{prefix}}.{{seed}}.png' instead" f"The filename format contains an unknown key '{e.args[0]}'. Will use {{prefix}}.{{seed}}.png' instead"
) )
filename = f"{prefix}.{seed}.png" filename = f"{prefix}.{seed}.png"
except IndexError: except IndexError:
ialog.error( log.error(
"The filename format is broken or complete. Will use '{prefix}.{seed}.png' instead" "The filename format is broken or complete. Will use '{prefix}.{seed}.png' instead"
) )
filename = f"{prefix}.{seed}.png" filename = f"{prefix}.{seed}.png"
@ -1091,14 +1091,14 @@ def split_variations(variations_string) -> list:
for part in variations_string.split(","): for part in variations_string.split(","):
seed_and_weight = part.split(":") seed_and_weight = part.split(":")
if len(seed_and_weight) != 2: if len(seed_and_weight) != 2:
ialog.warning(f'Could not parse with_variation part "{part}"') log.warning(f'Could not parse with_variation part "{part}"')
broken = True broken = True
break break
try: try:
seed = int(seed_and_weight[0]) seed = int(seed_and_weight[0])
weight = float(seed_and_weight[1]) weight = float(seed_and_weight[1])
except ValueError: except ValueError:
ialog.warning(f'Could not parse with_variation part "{part}"') log.warning(f'Could not parse with_variation part "{part}"')
broken = True broken = True
break break
parts.append([seed, weight]) parts.append([seed, weight])
@ -1122,23 +1122,23 @@ def load_face_restoration(opt):
opt.gfpgan_model_path opt.gfpgan_model_path
) )
else: else:
ialog.info("Face restoration disabled") log.info("Face restoration disabled")
if opt.esrgan: if opt.esrgan:
esrgan = restoration.load_esrgan(opt.esrgan_bg_tile) esrgan = restoration.load_esrgan(opt.esrgan_bg_tile)
else: else:
ialog.info("Upscaling disabled") log.info("Upscaling disabled")
else: else:
ialog.info("Face restoration and upscaling disabled") log.info("Face restoration and upscaling disabled")
except (ModuleNotFoundError, ImportError): except (ModuleNotFoundError, ImportError):
print(traceback.format_exc(), file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
ialog.info("You may need to install the ESRGAN and/or GFPGAN modules") log.info("You may need to install the ESRGAN and/or GFPGAN modules")
return gfpgan, codeformer, esrgan return gfpgan, codeformer, esrgan
def make_step_callback(gen, opt, prefix): def make_step_callback(gen, opt, prefix):
destination = os.path.join(opt.outdir, "intermediates", prefix) destination = os.path.join(opt.outdir, "intermediates", prefix)
os.makedirs(destination, exist_ok=True) os.makedirs(destination, exist_ok=True)
ialog.info(f"Intermediate images will be written into {destination}") log.info(f"Intermediate images will be written into {destination}")
def callback(state: PipelineIntermediateState): def callback(state: PipelineIntermediateState):
latents = state.latents latents = state.latents
@ -1180,11 +1180,11 @@ def retrieve_dream_command(opt, command, completer):
try: try:
cmd = dream_cmd_from_png(path) cmd = dream_cmd_from_png(path)
except OSError: except OSError:
ialog.error(f"{tokens[0]}: file could not be read") log.error(f"{tokens[0]}: file could not be read")
except (KeyError, AttributeError, IndexError): except (KeyError, AttributeError, IndexError):
ialog.error(f"{tokens[0]}: file has no metadata") log.error(f"{tokens[0]}: file has no metadata")
except: except:
ialog.error(f"{tokens[0]}: file could not be processed") log.error(f"{tokens[0]}: file could not be processed")
if len(cmd) > 0: if len(cmd) > 0:
completer.set_line(cmd) completer.set_line(cmd)
@ -1193,7 +1193,7 @@ def write_commands(opt, file_path: str, outfilepath: str):
try: try:
paths = sorted(list(Path(dir).glob(basename))) paths = sorted(list(Path(dir).glob(basename)))
except ValueError: except ValueError:
ialog.error(f'"{basename}": unacceptable pattern') log.error(f'"{basename}": unacceptable pattern')
return return
commands = [] commands = []
@ -1202,9 +1202,9 @@ def write_commands(opt, file_path: str, outfilepath: str):
try: try:
cmd = dream_cmd_from_png(path) cmd = dream_cmd_from_png(path)
except (KeyError, AttributeError, IndexError): except (KeyError, AttributeError, IndexError):
ialog.error(f"{path}: file has no metadata") log.error(f"{path}: file has no metadata")
except: except:
ialog.error(f"{path}: file could not be processed") log.error(f"{path}: file could not be processed")
if cmd: if cmd:
commands.append(f"# {path}") commands.append(f"# {path}")
commands.append(cmd) commands.append(cmd)
@ -1214,17 +1214,17 @@ def write_commands(opt, file_path: str, outfilepath: str):
outfilepath = os.path.join(opt.outdir, basename) outfilepath = os.path.join(opt.outdir, basename)
with open(outfilepath, "w", encoding="utf-8") as f: with open(outfilepath, "w", encoding="utf-8") as f:
f.write("\n".join(commands)) f.write("\n".join(commands))
ialog.info(f"File {outfilepath} with commands created") log.info(f"File {outfilepath} with commands created")
def report_model_error(opt: Namespace, e: Exception): def report_model_error(opt: Namespace, e: Exception):
ialog.warning(f'An error occurred while attempting to initialize the model: "{str(e)}"') log.warning(f'An error occurred while attempting to initialize the model: "{str(e)}"')
ialog.warning( log.warning(
"This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models." "This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
) )
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE") yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
if yes_to_all: if yes_to_all:
ialog.warning( log.warning(
"Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE" "Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
) )
else: else:
@ -1234,7 +1234,7 @@ def report_model_error(opt: Namespace, e: Exception):
): ):
return return
ialog.info("invokeai-configure is launching....\n") log.info("invokeai-configure is launching....\n")
# Match arguments that were set on the CLI # Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed # only the arguments accepted by the configuration script are parsed
@ -1251,7 +1251,7 @@ def report_model_error(opt: Namespace, e: Exception):
from ..install import invokeai_configure from ..install import invokeai_configure
invokeai_configure() invokeai_configure()
ialog.warning("InvokeAI will now restart") log.warning("InvokeAI will now restart")
sys.argv = previous_args sys.argv = previous_args
main() # would rather do a os.exec(), but doesn't exist? main() # would rather do a os.exec(), but doesn't exist?
sys.exit(0) sys.exit(0)