replaced remaining print statements with log.*()

This commit is contained in:
Lincoln Stein 2023-04-18 20:49:00 -04:00
parent 0b0e6fe448
commit b164330e3c
13 changed files with 82 additions and 73 deletions

View File

@ -3,8 +3,9 @@
import os
from argparse import Namespace
from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
import invokeai.backend.util.logging as log
from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
from ...backend import Globals
from ..services.model_manager_initializer import get_model_manager
from ..services.restoration_services import RestorationServices
@ -47,8 +48,7 @@ class ApiDependencies:
Globals.disable_xformers = not config.xformers
Globals.ckpt_convert = config.ckpt_convert
# TODO: Use a logger
print(f">> Internet connectivity is {Globals.internet_available}")
log.info(f"Internet connectivity is {Globals.internet_available}")
events = FastAPIEventService(event_handler_id)

View File

@ -4,6 +4,7 @@ import shutil
import asyncio
from typing import Annotated, Any, List, Literal, Optional, Union
import invokeai.backend.util.logging as log
from fastapi.routing import APIRouter, HTTPException
from pydantic import BaseModel, Field, parse_obj_as
from pathlib import Path
@ -115,16 +116,16 @@ async def delete_model(model_name: str) -> None:
model_exists = model_name in model_names
# check if model exists
print(f">> Checking for model {model_name}...")
log.info(f"Checking for model {model_name}...")
if model_exists:
print(f">> Deleting Model: {model_name}")
log.info(f"Deleting Model: {model_name}")
ApiDependencies.invoker.services.model_manager.del_model(model_name, delete_files=True)
print(f">> Model Deleted: {model_name}")
log.info(f"Model Deleted: {model_name}")
raise HTTPException(status_code=204, detail=f"Model '{model_name}' deleted successfully")
else:
print(f">> Model not found")
log.error(f"Model not found")
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found")
@ -248,4 +249,4 @@ async def delete_model(model_name: str) -> None:
# )
# print(f">> Models Merged: {models_to_merge}")
# print(f">> New Model Added: {model_merge_info['merged_model_name']}")
# except Exception as e:
# except Exception as e:

View File

@ -7,6 +7,7 @@ from pydantic import BaseModel, Field
import networkx as nx
import matplotlib.pyplot as plt
import invokeai.backend.util.logging as log
from ..models.image import ImageField
from ..services.graph import GraphExecutionState
from ..services.invoker import Invoker
@ -183,7 +184,7 @@ class HistoryCommand(BaseCommand):
for i in range(min(self.count, len(history))):
entry_id = history[-1 - i]
entry = context.get_session().graph.get_node(entry_id)
print(f"{entry_id}: {get_invocation_command(entry)}")
log.info(f"{entry_id}: {get_invocation_command(entry)}")
class SetDefaultCommand(BaseCommand):

View File

@ -10,6 +10,7 @@ import shlex
from pathlib import Path
from typing import List, Dict, Literal, get_args, get_type_hints, get_origin
import invokeai.backend.util.logging as log
from ...backend import ModelManager, Globals
from ..invocations.baseinvocation import BaseInvocation
from .commands import BaseCommand
@ -160,8 +161,8 @@ def set_autocompleter(model_manager: ModelManager) -> Completer:
pass
except OSError: # file likely corrupted
newname = f"{histfile}.old"
print(
f"## Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}"
log.error(
f"Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}"
)
histfile.replace(Path(newname))
atexit.register(readline.write_history_file, histfile)

View File

@ -13,6 +13,7 @@ from typing import (
from pydantic import BaseModel
from pydantic.fields import Field
import invokeai.backend.util.logging as log
from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
from ..backend import Args
@ -125,7 +126,7 @@ def invoke_all(context: CliContext):
# Print any errors
if context.session.has_error():
for n in context.session.errors:
print(
log.error(
f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {context.session.errors[n]}"
)
@ -279,12 +280,12 @@ def invoke_cli():
invoke_all(context)
except InvalidArgs:
print('Invalid command, use "help" to list commands')
log.warning('Invalid command, use "help" to list commands')
continue
except SessionError:
# Start a new session
print("Session error: creating a new session")
log.warning("Session error: creating a new session")
context.session = context.invoker.create_execution_state()
except ExitCli:

View File

@ -1,3 +1,4 @@
import invokeai.backend.util.logging as log
from invokeai.app.invocations.baseinvocation import InvocationContext
from invokeai.backend.model_management.model_manager import ModelManager
@ -7,5 +8,5 @@ def choose_model(model_manager: ModelManager, model_name: str):
if model_manager.valid_model(model_name):
return model_manager.get_model(model_name)
else:
print(f"* Warning: '{model_name}' is not a valid model name. Using default model instead.")
return model_manager.get_model()
log.warning(f"'{model_name}' is not a valid model name. Using default model instead.")
return model_manager.get_model()

View File

@ -7,6 +7,7 @@ from omegaconf import OmegaConf
from pathlib import Path
import invokeai.version
import invokeai.backend.util.logging as log
from ...backend import ModelManager
from ...backend.util import choose_precision, choose_torch_device
from ...backend import Globals
@ -20,8 +21,8 @@ def get_model_manager(config: Args) -> ModelManager:
config, FileNotFoundError(f"The file {config_file} could not be found.")
)
print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}")
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
log.info(f"{invokeai.version.__app_name__}, version {invokeai.version.__version__}")
log.info(f'InvokeAI runtime directory is "{Globals.root}"')
# these two lines prevent a horrible warning message from appearing
# when the frozen CLIP tokenizer is imported
@ -66,7 +67,7 @@ def get_model_manager(config: Args) -> ModelManager:
except (FileNotFoundError, TypeError, AssertionError) as e:
report_model_error(config, e)
except (IOError, KeyError) as e:
print(f"{e}. Aborting.")
log.error(f"{e}. Aborting.")
sys.exit(-1)
# try to autoconvert new models
@ -80,14 +81,14 @@ def get_model_manager(config: Args) -> ModelManager:
return model_manager
def report_model_error(opt: Namespace, e: Exception):
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
print(
"** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
log.error(f'An error occurred while attempting to initialize the model: "{str(e)}"')
log.error(
"This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
)
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
if yes_to_all:
print(
"** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
log.warning
"Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
)
else:
response = input(
@ -96,7 +97,7 @@ def report_model_error(opt: Namespace, e: Exception):
if response.startswith(("n", "N")):
return
print("invokeai-configure is launching....\n")
log.info("invokeai-configure is launching....\n")
# Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed

View File

@ -1,6 +1,7 @@
import sys
import traceback
import torch
import invokeai.backend.util.logging as log
from ...backend.restoration import Restoration
from ...backend.util import choose_torch_device, CPU_DEVICE, MPS_DEVICE
@ -20,16 +21,16 @@ class RestorationServices:
args.gfpgan_model_path
)
else:
print(">> Face restoration disabled")
log.info("Face restoration disabled")
if args.esrgan:
esrgan = restoration.load_esrgan(args.esrgan_bg_tile)
else:
print(">> Upscaling disabled")
log.info("Upscaling disabled")
else:
print(">> Face restoration and upscaling disabled")
log.info("Face restoration and upscaling disabled")
except (ModuleNotFoundError, ImportError):
print(traceback.format_exc(), file=sys.stderr)
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
log.info("You may need to install the ESRGAN and/or GFPGAN modules")
self.device = torch.device(choose_torch_device())
self.gfpgan = gfpgan
self.codeformer = codeformer
@ -58,15 +59,15 @@ class RestorationServices:
if self.gfpgan is not None or self.codeformer is not None:
if facetool == "gfpgan":
if self.gfpgan is None:
print(
">> GFPGAN not found. Face restoration is disabled."
log.info(
"GFPGAN not found. Face restoration is disabled."
)
else:
image = self.gfpgan.process(image, strength, seed)
if facetool == "codeformer":
if self.codeformer is None:
print(
">> CodeFormer not found. Face restoration is disabled."
log.info(
"CodeFormer not found. Face restoration is disabled."
)
else:
cf_device = (
@ -80,7 +81,7 @@ class RestorationServices:
fidelity=codeformer_fidelity,
)
else:
print(">> Face Restoration is disabled.")
log.info("Face Restoration is disabled.")
if upscale is not None:
if self.esrgan is not None:
if len(upscale) < 2:
@ -93,10 +94,10 @@ class RestorationServices:
denoise_str=upscale_denoise_str,
)
else:
print(">> ESRGAN is disabled. Image not upscaled.")
log.info("ESRGAN is disabled. Image not upscaled.")
except Exception as e:
print(
f">> Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}"
log.info(
f"Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}"
)
if image_callback is not None:

View File

@ -1088,7 +1088,7 @@ class Generate:
image = img
log.info(f"using provided input image of size {image.width}x{image.height}")
elif isinstance(img, str):
assert os.path.exists(img), f">> {img}: File not found"
assert os.path.exists(img), f"{img}: File not found"
image = Image.open(img)
log.info(

View File

@ -1,5 +1,6 @@
# Copyright (c) 2023 Lincoln D. Stein and The InvokeAI Development Team
"""invokeai.util.logging
Copyright 2023 The InvokeAI Development Team
Logging class for InvokeAI that produces console messages that follow
the conventions established in InvokeAI 1.X through 2.X.

View File

@ -22,6 +22,7 @@ import torch
from npyscreen import widget
from omegaconf import OmegaConf
import invokeai.backend.logging as log
from invokeai.backend.globals import Globals, global_config_dir
from ...backend.config.model_install_backend import (
@ -455,8 +456,8 @@ def main():
Globals.root = os.path.expanduser(get_root(opt.root) or "")
if not global_config_dir().exists():
print(
">> Your InvokeAI root directory is not set up. Calling invokeai-configure."
log.info(
"Your InvokeAI root directory is not set up. Calling invokeai-configure."
)
from invokeai.frontend.install import invokeai_configure
@ -466,18 +467,18 @@ def main():
try:
select_and_download_models(opt)
except AssertionError as e:
print(str(e))
log.error(e)
sys.exit(-1)
except KeyboardInterrupt:
print("\nGoodbye! Come back soon.")
log.info("Goodbye! Come back soon.")
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
print(
"** Insufficient vertical space for the interface. Please make your window taller and try again"
log.error(
"Insufficient vertical space for the interface. Please make your window taller and try again"
)
elif str(e).startswith("addwstr"):
print(
"** Insufficient horizontal space for the interface. Please make your window wider and try again."
log.error(
"Insufficient horizontal space for the interface. Please make your window wider and try again."
)

View File

@ -27,6 +27,8 @@ from ...backend.globals import (
global_models_dir,
global_set_root,
)
import invokeai.backend.util.logging as log
from ...backend.model_management import ModelManager
from ...frontend.install.widgets import FloatTitleSlider
@ -113,7 +115,7 @@ def merge_diffusion_models_and_commit(
model_name=merged_model_name, description=f'Merge of models {", ".join(models)}'
)
if vae := model_manager.config[models[0]].get("vae", None):
print(f">> Using configured VAE assigned to {models[0]}")
log.info(f"Using configured VAE assigned to {models[0]}")
import_args.update(vae=vae)
model_manager.import_diffuser_model(dump_path, **import_args)
model_manager.commit(config_file)
@ -391,10 +393,8 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
for name in self.model_manager.model_names()
if self.model_manager.model_info(name).get("format") == "diffusers"
]
print(model_names)
return sorted(model_names)
class Mergeapp(npyscreen.NPSAppManaged):
def __init__(self):
super().__init__()
@ -414,7 +414,7 @@ def run_gui(args: Namespace):
args = mergeapp.merge_arguments
merge_diffusion_models_and_commit(**args)
print(f'>> Models merged into new model: "{args["merged_model_name"]}".')
log.info(f'Models merged into new model: "{args["merged_model_name"]}".')
def run_cli(args: Namespace):
@ -425,8 +425,8 @@ def run_cli(args: Namespace):
if not args.merged_model_name:
args.merged_model_name = "+".join(args.models)
print(
f'>> No --merged_model_name provided. Defaulting to "{args.merged_model_name}"'
log.info(
f'No --merged_model_name provided. Defaulting to "{args.merged_model_name}"'
)
model_manager = ModelManager(OmegaConf.load(global_config_file()))
@ -435,7 +435,7 @@ def run_cli(args: Namespace):
), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.'
merge_diffusion_models_and_commit(**vars(args))
print(f'>> Models merged into new model: "{args.merged_model_name}".')
log.info(f'Models merged into new model: "{args.merged_model_name}".')
def main():
@ -455,17 +455,16 @@ def main():
run_cli(args)
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
print(
"** You need to have at least two diffusers models defined in models.yaml in order to merge"
log.error(
"You need to have at least two diffusers models defined in models.yaml in order to merge"
)
else:
print(
"** Not enough room for the user interface. Try making this window larger."
log.error(
"Not enough room for the user interface. Try making this window larger."
)
sys.exit(-1)
except Exception:
print(">> An error occurred:")
traceback.print_exc()
except Exception as e:
log.error(e)
sys.exit(-1)
except KeyboardInterrupt:
sys.exit(-1)

View File

@ -20,6 +20,7 @@ import npyscreen
from npyscreen import widget
from omegaconf import OmegaConf
import invokeai.backend.util.logging as log
from invokeai.backend.globals import Globals, global_set_root
from ...backend.training import do_textual_inversion_training, parse_args
@ -368,14 +369,14 @@ def copy_to_embeddings_folder(args: dict):
dest_dir_name = args["placeholder_token"].strip("<>")
destination = Path(Globals.root, "embeddings", dest_dir_name)
os.makedirs(destination, exist_ok=True)
print(f">> Training completed. Copying learned_embeds.bin into {str(destination)}")
log.info(f"Training completed. Copying learned_embeds.bin into {str(destination)}")
shutil.copy(source, destination)
if (
input("Delete training logs and intermediate checkpoints? [y] ") or "y"
).startswith(("y", "Y")):
shutil.rmtree(Path(args["output_dir"]))
else:
print(f'>> Keeping {args["output_dir"]}')
log.info(f'Keeping {args["output_dir"]}')
def save_args(args: dict):
@ -422,10 +423,10 @@ def do_front_end(args: Namespace):
do_textual_inversion_training(**args)
copy_to_embeddings_folder(args)
except Exception as e:
print("** An exception occurred during training. The exception was:")
print(str(e))
print("** DETAILS:")
print(traceback.format_exc())
log.error("An exception occurred during training. The exception was:")
log.error(str(e))
log.error("DETAILS:")
log.error(traceback.format_exc())
def main():
@ -437,21 +438,21 @@ def main():
else:
do_textual_inversion_training(**vars(args))
except AssertionError as e:
print(str(e))
log.error(e)
sys.exit(-1)
except KeyboardInterrupt:
pass
except (widget.NotEnoughSpaceForWidget, Exception) as e:
if str(e).startswith("Height of 1 allocated"):
print(
"** You need to have at least one diffusers models defined in models.yaml in order to train"
log.error(
"You need to have at least one diffusers models defined in models.yaml in order to train"
)
elif str(e).startswith("addwstr"):
print(
"** Not enough window space for the interface. Please make your window larger and try again."
log.error(
"Not enough window space for the interface. Please make your window larger and try again."
)
else:
print(f"** An error has occurred: {str(e)}")
log.error(e)
sys.exit(-1)