rename log to logger throughout

This commit is contained in:
Lincoln Stein 2023-04-29 09:43:40 -04:00
parent f0e07bff5a
commit 8db20e0d95
36 changed files with 387 additions and 387 deletions

View File

@ -2,7 +2,7 @@
import os
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from ..services.default_graphs import create_system_graphs
from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
@ -49,7 +49,7 @@ class ApiDependencies:
Globals.disable_xformers = not config.xformers
Globals.ckpt_convert = config.ckpt_convert
log.info(f"Internet connectivity is {Globals.internet_available}")
logger.info(f"Internet connectivity is {Globals.internet_available}")
events = FastAPIEventService(event_handler_id)

View File

@ -4,7 +4,7 @@ import shutil
import asyncio
from typing import Annotated, Any, List, Literal, Optional, Union
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from fastapi.routing import APIRouter, HTTPException
from pydantic import BaseModel, Field, parse_obj_as
from pathlib import Path
@ -116,16 +116,16 @@ async def delete_model(model_name: str) -> None:
model_exists = model_name in model_names
# check if model exists
log.info(f"Checking for model {model_name}...")
logger.info(f"Checking for model {model_name}...")
if model_exists:
log.info(f"Deleting Model: {model_name}")
logger.info(f"Deleting Model: {model_name}")
ApiDependencies.invoker.services.model_manager.del_model(model_name, delete_files=True)
log.info(f"Model Deleted: {model_name}")
logger.info(f"Model Deleted: {model_name}")
raise HTTPException(status_code=204, detail=f"Model '{model_name}' deleted successfully")
else:
log.error(f"Model not found")
logger.error(f"Model not found")
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found")

View File

@ -7,7 +7,7 @@ from pydantic import BaseModel, Field
import networkx as nx
import matplotlib.pyplot as plt
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from ..invocations.baseinvocation import BaseInvocation
from ..invocations.image import ImageField
from ..services.graph import GraphExecutionState, LibraryGraph, Edge
@ -230,7 +230,7 @@ class HistoryCommand(BaseCommand):
for i in range(min(self.count, len(history))):
entry_id = history[-1 - i]
entry = context.get_session().graph.get_node(entry_id)
log.info(f"{entry_id}: {get_invocation_command(entry)}")
logger.info(f"{entry_id}: {get_invocation_command(entry)}")
class SetDefaultCommand(BaseCommand):

View File

@ -10,7 +10,7 @@ import shlex
from pathlib import Path
from typing import List, Dict, Literal, get_args, get_type_hints, get_origin
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from ...backend import ModelManager, Globals
from ..invocations.baseinvocation import BaseInvocation
from .commands import BaseCommand
@ -161,7 +161,7 @@ def set_autocompleter(model_manager: ModelManager) -> Completer:
pass
except OSError: # file likely corrupted
newname = f"{histfile}.old"
log.error(
logger.error(
f"Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}"
)
histfile.replace(Path(newname))

View File

@ -14,7 +14,7 @@ from pydantic import BaseModel
from pydantic.fields import Field
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.app.services.metadata import PngMetadataService
from .services.default_graphs import create_system_graphs
from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
@ -181,7 +181,7 @@ def invoke_all(context: CliContext):
# Print any errors
if context.session.has_error():
for n in context.session.errors:
log.error(
logger.error(
f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {context.session.errors[n]}"
)
@ -364,12 +364,12 @@ def invoke_cli():
invoke_all(context)
except InvalidArgs:
log.warning('Invalid command, use "help" to list commands')
logger.warning('Invalid command, use "help" to list commands')
continue
except SessionError:
# Start a new session
log.warning("Session error: creating a new session")
logger.warning("Session error: creating a new session")
context.reset()
except ExitCli:

View File

@ -1,4 +1,4 @@
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.backend.model_management.model_manager import ModelManager
@ -8,6 +8,6 @@ def choose_model(model_manager: ModelManager, model_name: str):
model = model_manager.get_model(model_name)
else:
model = model_manager.get_model()
log.warning(f"{model_name}' is not a valid model name. Using default model \'{model['model_name']}\' instead.")
logger.warning(f"{model_name}' is not a valid model name. Using default model \'{model['model_name']}\' instead.")
return model

View File

@ -7,7 +7,7 @@ from omegaconf import OmegaConf
from pathlib import Path
import invokeai.version
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from ...backend import ModelManager
from ...backend.util import choose_precision, choose_torch_device
from ...backend import Globals
@ -21,8 +21,8 @@ def get_model_manager(config: Args) -> ModelManager:
config, FileNotFoundError(f"The file {config_file} could not be found.")
)
log.info(f"{invokeai.version.__app_name__}, version {invokeai.version.__version__}")
log.info(f'InvokeAI runtime directory is "{Globals.root}"')
logger.info(f"{invokeai.version.__app_name__}, version {invokeai.version.__version__}")
logger.info(f'InvokeAI runtime directory is "{Globals.root}"')
# these two lines prevent a horrible warning message from appearing
# when the frozen CLIP tokenizer is imported
@ -67,7 +67,7 @@ def get_model_manager(config: Args) -> ModelManager:
except (FileNotFoundError, TypeError, AssertionError) as e:
report_model_error(config, e)
except (IOError, KeyError) as e:
log.error(f"{e}. Aborting.")
logger.error(f"{e}. Aborting.")
sys.exit(-1)
# try to autoconvert new models
@ -81,13 +81,13 @@ def get_model_manager(config: Args) -> ModelManager:
return model_manager
def report_model_error(opt: Namespace, e: Exception):
log.error(f'An error occurred while attempting to initialize the model: "{str(e)}"')
log.error(
logger.error(f'An error occurred while attempting to initialize the model: "{str(e)}"')
logger.error(
"This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
)
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
if yes_to_all:
log.warning
logger.warning
"Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
)
else:
@ -97,7 +97,7 @@ def report_model_error(opt: Namespace, e: Exception):
if response.startswith(("n", "N")):
return
log.info("invokeai-configure is launching....\n")
logger.info("invokeai-configure is launching....\n")
# Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed

View File

@ -1,7 +1,7 @@
import sys
import traceback
import torch
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from ...backend.restoration import Restoration
from ...backend.util import choose_torch_device, CPU_DEVICE, MPS_DEVICE
@ -21,16 +21,16 @@ class RestorationServices:
args.gfpgan_model_path
)
else:
log.info("Face restoration disabled")
logger.info("Face restoration disabled")
if args.esrgan:
esrgan = restoration.load_esrgan(args.esrgan_bg_tile)
else:
log.info("Upscaling disabled")
logger.info("Upscaling disabled")
else:
log.info("Face restoration and upscaling disabled")
logger.info("Face restoration and upscaling disabled")
except (ModuleNotFoundError, ImportError):
print(traceback.format_exc(), file=sys.stderr)
log.info("You may need to install the ESRGAN and/or GFPGAN modules")
logger.info("You may need to install the ESRGAN and/or GFPGAN modules")
self.device = torch.device(choose_torch_device())
self.gfpgan = gfpgan
self.codeformer = codeformer
@ -59,14 +59,14 @@ class RestorationServices:
if self.gfpgan is not None or self.codeformer is not None:
if facetool == "gfpgan":
if self.gfpgan is None:
log.info(
logger.info(
"GFPGAN not found. Face restoration is disabled."
)
else:
image = self.gfpgan.process(image, strength, seed)
if facetool == "codeformer":
if self.codeformer is None:
log.info(
logger.info(
"CodeFormer not found. Face restoration is disabled."
)
else:
@ -81,7 +81,7 @@ class RestorationServices:
fidelity=codeformer_fidelity,
)
else:
log.info("Face Restoration is disabled.")
logger.info("Face Restoration is disabled.")
if upscale is not None:
if self.esrgan is not None:
if len(upscale) < 2:
@ -94,9 +94,9 @@ class RestorationServices:
denoise_str=upscale_denoise_str,
)
else:
log.info("ESRGAN is disabled. Image not upscaled.")
logger.info("ESRGAN is disabled. Image not upscaled.")
except Exception as e:
log.info(
logger.info(
f"Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}"
)

View File

@ -96,7 +96,7 @@ from pathlib import Path
from typing import List
import invokeai.version
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.backend.image_util import retrieve_metadata
from .globals import Globals
@ -190,7 +190,7 @@ class Args(object):
print(f"{APP_NAME} {APP_VERSION}")
sys.exit(0)
log.info("Initializing, be patient...")
logger.info("Initializing, be patient...")
Globals.root = Path(os.path.abspath(switches.root_dir or Globals.root))
Globals.try_patchmatch = switches.patchmatch
@ -198,12 +198,12 @@ class Args(object):
initfile = os.path.expanduser(os.path.join(Globals.root, Globals.initfile))
legacyinit = os.path.expanduser("~/.invokeai")
if os.path.exists(initfile):
log.info(
logger.info(
f"Initialization file {initfile} found. Loading...",
)
sysargs.insert(0, f"@{initfile}")
elif os.path.exists(legacyinit):
log.warning(
logger.warning(
f"Old initialization file found at {legacyinit}. This location is deprecated. Please move it to {Globals.root}/invokeai.init."
)
sysargs.insert(0, f"@{legacyinit}")
@ -214,7 +214,7 @@ class Args(object):
self._arg_switches = self._arg_parser.parse_args(sysargs)
return self._arg_switches
except Exception as e:
log.error(f"An exception has occurred: {e}")
logger.error(f"An exception has occurred: {e}")
return None
def parse_cmd(self, cmd_string):
@ -1154,7 +1154,7 @@ class Args(object):
def format_metadata(**kwargs):
log.warning("format_metadata() is deprecated. Please use metadata_dumps()")
logger.warning("format_metadata() is deprecated. Please use metadata_dumps()")
return metadata_dumps(kwargs)
@ -1326,7 +1326,7 @@ def metadata_loads(metadata) -> list:
import sys
import traceback
log.error("Could not read metadata")
logger.error("Could not read metadata")
print(traceback.format_exc(), file=sys.stderr)
return results

View File

@ -27,7 +27,7 @@ from diffusers.utils.import_utils import is_xformers_available
from omegaconf import OmegaConf
from pathlib import Path
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from .args import metadata_from_png
from .generator import infill_methods
from .globals import Globals, global_cache_dir
@ -196,12 +196,12 @@ class Generate:
# device to Generate(). However the device was then ignored, so
# it wasn't actually doing anything. This logic could be reinstated.
self.device = torch.device(choose_torch_device())
log.info(f"Using device_type {self.device.type}")
logger.info(f"Using device_type {self.device.type}")
if full_precision:
if self.precision != "auto":
raise ValueError("Remove --full_precision / -F if using --precision")
log.warning("Please remove deprecated --full_precision / -F")
log.warning("If auto config does not work you can use --precision=float32")
logger.warning("Please remove deprecated --full_precision / -F")
logger.warning("If auto config does not work you can use --precision=float32")
self.precision = "float32"
if self.precision == "auto":
self.precision = choose_precision(self.device)
@ -209,13 +209,13 @@ class Generate:
if is_xformers_available():
if torch.cuda.is_available() and not Globals.disable_xformers:
log.info("xformers memory-efficient attention is available and enabled")
logger.info("xformers memory-efficient attention is available and enabled")
else:
log.info(
logger.info(
"xformers memory-efficient attention is available but disabled"
)
else:
log.info("xformers not installed")
logger.info("xformers not installed")
# model caching system for fast switching
self.model_manager = ModelManager(
@ -230,7 +230,7 @@ class Generate:
fallback = self.model_manager.default_model() or FALLBACK_MODEL_NAME
model = model or fallback
if not self.model_manager.valid_model(model):
log.warning(
logger.warning(
f'"{model}" is not a known model name; falling back to {fallback}.'
)
model = None
@ -247,10 +247,10 @@ class Generate:
# load safety checker if requested
if safety_checker:
log.info("Initializing NSFW checker")
logger.info("Initializing NSFW checker")
self.safety_checker = SafetyChecker(self.device)
else:
log.info("NSFW checker is disabled")
logger.info("NSFW checker is disabled")
def prompt2png(self, prompt, outdir, **kwargs):
"""
@ -568,7 +568,7 @@ class Generate:
self.clear_cuda_cache()
if catch_interrupts:
log.warning("Interrupted** Partial results will be returned.")
logger.warning("Interrupted** Partial results will be returned.")
else:
raise KeyboardInterrupt
except RuntimeError:
@ -576,11 +576,11 @@ class Generate:
self.clear_cuda_cache()
print(traceback.format_exc(), file=sys.stderr)
log.info("Could not generate image.")
logger.info("Could not generate image.")
toc = time.time()
log.info("Usage stats:")
log.info(f"{len(results)} image(s) generated in "+"%4.2fs" % (toc - tic))
logger.info("Usage stats:")
logger.info(f"{len(results)} image(s) generated in "+"%4.2fs" % (toc - tic))
self.print_cuda_stats()
return results
@ -610,14 +610,14 @@ class Generate:
def print_cuda_stats(self):
if self._has_cuda():
self.gather_cuda_stats()
log.info(
logger.info(
"Max VRAM used for this generation: "+
"%4.2fG. " % (self.max_memory_allocated / 1e9)+
"Current VRAM utilization: "+
"%4.2fG" % (self.memory_allocated / 1e9)
)
log.info(
logger.info(
"Max VRAM used since script start: " +
"%4.2fG" % (self.session_peakmem / 1e9)
)
@ -648,7 +648,7 @@ class Generate:
seed = random.randrange(0, np.iinfo(np.uint32).max)
prompt = opt.prompt or args.prompt or ""
log.info(f'using seed {seed} and prompt "{prompt}" for {image_path}')
logger.info(f'using seed {seed} and prompt "{prompt}" for {image_path}')
# try to reuse the same filename prefix as the original file.
# we take everything up to the first period
@ -697,7 +697,7 @@ class Generate:
try:
extend_instructions[direction] = int(pixels)
except ValueError:
log.warning(
logger.warning(
'invalid extension instruction. Use <directions> <pixels>..., as in "top 64 left 128 right 64 bottom 64"'
)
@ -721,7 +721,7 @@ class Generate:
# fetch the metadata from the image
generator = self.select_generator(embiggen=True)
opt.strength = opt.embiggen_strength or 0.40
log.info(
logger.info(
f"Setting img2img strength to {opt.strength} for happy embiggening"
)
generator.generate(
@ -749,12 +749,12 @@ class Generate:
return restorer.process(opt, args, image_callback=callback, prefix=prefix)
elif tool is None:
log.warning(
logger.warning(
"please provide at least one postprocessing option, such as -G or -U"
)
return None
else:
log.warning(f"postprocessing tool {tool} is not yet supported")
logger.warning(f"postprocessing tool {tool} is not yet supported")
return None
def select_generator(
@ -798,7 +798,7 @@ class Generate:
image = self._load_img(img)
if image.width < self.width and image.height < self.height:
log.warning(
logger.warning(
f"img2img and inpainting may produce unexpected results with initial images smaller than {self.width}x{self.height} in both dimensions"
)
@ -810,7 +810,7 @@ class Generate:
if (image.width * image.height) > (
self.width * self.height
) and self.size_matters:
log.info(
logger.info(
"This input is larger than your defaults. If you run out of memory, please use a smaller image."
)
self.size_matters = False
@ -892,11 +892,11 @@ class Generate:
try:
model_data = cache.get_model(model_name)
except Exception as e:
log.warning(f"model {model_name} could not be loaded: {str(e)}")
logger.warning(f"model {model_name} could not be loaded: {str(e)}")
print(traceback.format_exc(), file=sys.stderr)
if previous_model_name is None:
raise e
log.warning("trying to reload previous model")
logger.warning("trying to reload previous model")
model_data = cache.get_model(previous_model_name) # load previous
if model_data is None:
raise e
@ -963,14 +963,14 @@ class Generate:
if self.gfpgan is not None or self.codeformer is not None:
if facetool == "gfpgan":
if self.gfpgan is None:
log.info(
logger.info(
"GFPGAN not found. Face restoration is disabled."
)
else:
image = self.gfpgan.process(image, strength, seed)
if facetool == "codeformer":
if self.codeformer is None:
log.info(
logger.info(
"CodeFormer not found. Face restoration is disabled."
)
else:
@ -985,7 +985,7 @@ class Generate:
fidelity=codeformer_fidelity,
)
else:
log.info("Face Restoration is disabled.")
logger.info("Face Restoration is disabled.")
if upscale is not None:
if self.esrgan is not None:
if len(upscale) < 2:
@ -998,9 +998,9 @@ class Generate:
denoise_str=upscale_denoise_str,
)
else:
log.info("ESRGAN is disabled. Image not upscaled.")
logger.info("ESRGAN is disabled. Image not upscaled.")
except Exception as e:
log.info(
logger.info(
f"Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}"
)
@ -1077,7 +1077,7 @@ class Generate:
)
self.sampler = default
log.info(msg)
logger.info(msg)
if not hasattr(self.sampler, "uses_inpainting_model"):
# FIXME: terrible kludge!
@ -1086,17 +1086,17 @@ class Generate:
def _load_img(self, img) -> Image:
if isinstance(img, Image.Image):
image = img
log.info(f"using provided input image of size {image.width}x{image.height}")
logger.info(f"using provided input image of size {image.width}x{image.height}")
elif isinstance(img, str):
assert os.path.exists(img), f"{img}: File not found"
image = Image.open(img)
log.info(
logger.info(
f"loaded input image of size {image.width}x{image.height} from {img}"
)
else:
image = Image.open(img)
log.info(f"loaded input image of size {image.width}x{image.height}")
logger.info(f"loaded input image of size {image.width}x{image.height}")
image = ImageOps.exif_transpose(image)
return image
@ -1184,11 +1184,11 @@ class Generate:
def _transparency_check_and_warning(self, image, mask, force_outpaint=False):
if not mask:
log.info(
logger.info(
"Initial image has transparent areas. Will inpaint in these regions."
)
if (not force_outpaint) and self._check_for_erasure(image):
log.info(
logger.info(
"Colors underneath the transparent region seem to have been erased.\n" +
"Inpainting will be suboptimal. Please preserve the colors when making\n" +
"a transparency mask, or provide mask explicitly using --init_mask (-M)."
@ -1202,10 +1202,10 @@ class Generate:
def _fit_image(self, image, max_dimensions):
w, h = max_dimensions
log.info(f"image will be resized to fit inside a box {w}x{h} in size.")
logger.info(f"image will be resized to fit inside a box {w}x{h} in size.")
# note that InitImageResizer does the multiple of 64 truncation internally
image = InitImageResizer(image).resize(width=w, height=h)
log.info(
logger.info(
f"after adjusting image dimensions to be multiples of 64, init image is {image.width}x{image.height}"
)
return image
@ -1217,7 +1217,7 @@ class Generate:
) # resize to integer multiple of 64
if h != height or w != width:
if log:
log.info(
logger.info(
f"Provided width and height must be multiples of 64. Auto-resizing to {w}x{h}"
)
height = h

View File

@ -25,7 +25,7 @@ from typing import Callable, List, Iterator, Optional, Type
from dataclasses import dataclass, field
from diffusers.schedulers import SchedulerMixin as Scheduler
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from ..image_util import configure_model_padding
from ..util.util import rand_perlin_2d
from ..safety_checker import SafetyChecker
@ -373,7 +373,7 @@ class Generator:
try:
x_T = self.get_noise(width, height)
except:
log.error("An error occurred while getting initial noise")
logger.error("An error occurred while getting initial noise")
print(traceback.format_exc())
# Pass on the seed in case a layer beneath us needs to generate noise on its own.
@ -608,7 +608,7 @@ class Generator:
image = self.sample_to_image(sample)
dirname = os.path.dirname(filepath) or "."
if not os.path.exists(dirname):
log.info(f"creating directory {dirname}")
logger.info(f"creating directory {dirname}")
os.makedirs(dirname, exist_ok=True)
image.save(filepath, "PNG")

View File

@ -8,7 +8,7 @@ import torch
from PIL import Image
from tqdm import trange
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from .base import Generator
from .img2img import Img2Img
@ -73,21 +73,21 @@ class Embiggen(Generator):
embiggen = [1.0] # If not specified, assume no scaling
elif embiggen[0] < 0:
embiggen[0] = 1.0
log.warning(
logger.warning(
"Embiggen scaling factor cannot be negative, fell back to the default of 1.0 !"
)
if len(embiggen) < 2:
embiggen.append(0.75)
elif embiggen[1] > 1.0 or embiggen[1] < 0:
embiggen[1] = 0.75
log.warning(
logger.warning(
"Embiggen upscaling strength for ESRGAN must be between 0 and 1, fell back to the default of 0.75 !"
)
if len(embiggen) < 3:
embiggen.append(0.25)
elif embiggen[2] < 0:
embiggen[2] = 0.25
log.warning(
logger.warning(
"Overlap size for Embiggen must be a positive ratio between 0 and 1 OR a number of pixels, fell back to the default of 0.25 !"
)
@ -98,7 +98,7 @@ class Embiggen(Generator):
embiggen_tiles.sort()
if strength >= 0.5:
log.warning(
logger.warning(
f"Embiggen may produce mirror motifs if the strength (-f) is too high (currently {strength}). Try values between 0.35-0.45."
)
@ -122,7 +122,7 @@ class Embiggen(Generator):
from ..restoration.realesrgan import ESRGAN
esrgan = ESRGAN()
log.info(
logger.info(
f"ESRGAN upscaling init image prior to cutting with Embiggen with strength {embiggen[1]}"
)
if embiggen[0] > 2:
@ -313,9 +313,9 @@ class Embiggen(Generator):
def make_image():
# Make main tiles -------------------------------------------------
if embiggen_tiles:
log.info(f"Making {len(embiggen_tiles)} Embiggen tiles...")
logger.info(f"Making {len(embiggen_tiles)} Embiggen tiles...")
else:
log.info(
logger.info(
f"Making {(emb_tiles_x * emb_tiles_y)} Embiggen tiles ({emb_tiles_x}x{emb_tiles_y})..."
)
@ -362,11 +362,11 @@ class Embiggen(Generator):
# newinitimage.save(newinitimagepath)
if embiggen_tiles:
log.debug(
logger.debug(
f"Making tile #{tile + 1} ({embiggen_tiles.index(tile) + 1} of {len(embiggen_tiles)} requested)"
)
else:
log.debug(f"Starting {tile + 1} of {(emb_tiles_x * emb_tiles_y)} tiles")
logger.debug(f"Starting {tile + 1} of {(emb_tiles_x * emb_tiles_y)} tiles")
# create a torch tensor from an Image
newinitimage = np.array(newinitimage).astype(np.float32) / 255.0
@ -548,7 +548,7 @@ class Embiggen(Generator):
# Layer tile onto final image
outputsuperimage.alpha_composite(intileimage, (left, top))
else:
log.error(
logger.error(
"Could not find all Embiggen output tiles in memory? Something must have gone wrong with img2img generation."
)

View File

@ -14,7 +14,7 @@ from ..stable_diffusion.diffusers_pipeline import StableDiffusionGeneratorPipeli
from ..stable_diffusion.diffusers_pipeline import ConditioningData
from ..stable_diffusion.diffusers_pipeline import trim_to_multiple_of
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
class Txt2Img2Img(Generator):
def __init__(self, model, precision):
@ -79,7 +79,7 @@ class Txt2Img2Img(Generator):
# the message below is accurate.
init_width = first_pass_latent_output.size()[3] * self.downsampling_factor
init_height = first_pass_latent_output.size()[2] * self.downsampling_factor
log.info(
logger.info(
f"Interpolating from {init_width}x{init_height} to {width}x{height} using DDIM sampling"
)

View File

@ -5,7 +5,7 @@ wraps the actual patchmatch object. It respects the global
be suppressed or deferred
"""
import numpy as np
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals
class PatchMatch:
@ -27,12 +27,12 @@ class PatchMatch:
from patchmatch import patch_match as pm
if pm.patchmatch_available:
log.info("Patchmatch initialized")
logger.info("Patchmatch initialized")
else:
log.info("Patchmatch not loaded (nonfatal)")
logger.info("Patchmatch not loaded (nonfatal)")
self.patch_match = pm
else:
log.info("Patchmatch loading disabled")
logger.info("Patchmatch loading disabled")
self.tried_load = True
@classmethod

View File

@ -32,7 +32,7 @@ import torch
from PIL import Image, ImageOps
from transformers import AutoProcessor, CLIPSegForImageSegmentation
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import global_cache_dir
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
@ -83,7 +83,7 @@ class Txt2Mask(object):
"""
def __init__(self, device="cpu", refined=False):
log.info("Initializing clipseg model for text to mask inference")
logger.info("Initializing clipseg model for text to mask inference")
# BUG: we are not doing anything with the device option at this time
self.device = device

View File

@ -25,7 +25,7 @@ from typing import Union
import torch
from safetensors.torch import load_file
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import global_cache_dir, global_config_dir
from .model_manager import ModelManager, SDLegacyType
@ -373,9 +373,9 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False
unet_key = "model.diffusion_model."
# at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
if sum(k.startswith("model_ema") for k in keys) > 100:
log.debug(f"Checkpoint {path} has both EMA and non-EMA weights.")
logger.debug(f"Checkpoint {path} has both EMA and non-EMA weights.")
if extract_ema:
log.debug("Extracting EMA weights (usually better for inference)")
logger.debug("Extracting EMA weights (usually better for inference)")
for key in keys:
if key.startswith("model.diffusion_model"):
flat_ema_key = "model_ema." + "".join(key.split(".")[1:])
@ -393,7 +393,7 @@ def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False
key
)
else:
log.debug(
logger.debug(
"Extracting only the non-EMA weights (usually better for fine-tuning)"
)
@ -1116,7 +1116,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
if "global_step" in checkpoint:
global_step = checkpoint["global_step"]
else:
log.debug("global_step key not found in model")
logger.debug("global_step key not found in model")
global_step = None
# sometimes there is a state_dict key and sometimes not
@ -1230,15 +1230,15 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
# If a replacement VAE path was specified, we'll incorporate that into
# the checkpoint model and then convert it
if vae_path:
log.debug(f"Converting VAE {vae_path}")
logger.debug(f"Converting VAE {vae_path}")
replace_checkpoint_vae(checkpoint,vae_path)
# otherwise we use the original VAE, provided that
# an externally loaded diffusers VAE was not passed
elif not vae:
log.debug("Using checkpoint model's original VAE")
logger.debug("Using checkpoint model's original VAE")
if vae:
log.debug("Using replacement diffusers VAE")
logger.debug("Using replacement diffusers VAE")
else: # convert the original or replacement VAE
vae_config = create_vae_diffusers_config(
original_config, image_size=image_size

View File

@ -24,7 +24,7 @@ import safetensors
import safetensors.torch
import torch
import transformers
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from diffusers import (
AutoencoderKL,
UNet2DConditionModel,
@ -133,7 +133,7 @@ class ModelManager(object):
)
if not self.valid_model(model_name):
log.error(
logger.error(
f'"{model_name}" is not a known model name. Please check your models.yaml file'
)
return self.current_model
@ -145,7 +145,7 @@ class ModelManager(object):
if model_name in self.models:
requested_model = self.models[model_name]["model"]
log.info(f"Retrieving model {model_name} from system RAM cache")
logger.info(f"Retrieving model {model_name} from system RAM cache")
requested_model.ready()
width = self.models[model_name]["width"]
height = self.models[model_name]["height"]
@ -380,7 +380,7 @@ class ModelManager(object):
"""
omega = self.config
if model_name not in omega:
log.error(f"Unknown model {model_name}")
logger.error(f"Unknown model {model_name}")
return
# save these for use in deletion later
conf = omega[model_name]
@ -393,13 +393,13 @@ class ModelManager(object):
self.stack.remove(model_name)
if delete_files:
if weights:
log.info(f"Deleting file {weights}")
logger.info(f"Deleting file {weights}")
Path(weights).unlink(missing_ok=True)
elif path:
log.info(f"Deleting directory {path}")
logger.info(f"Deleting directory {path}")
rmtree(path, ignore_errors=True)
elif repo_id:
log.info(f"Deleting the cached model directory for {repo_id}")
logger.info(f"Deleting the cached model directory for {repo_id}")
self._delete_model_from_cache(repo_id)
def add_model(
@ -440,7 +440,7 @@ class ModelManager(object):
def _load_model(self, model_name: str):
"""Load and initialize the model from configuration variables passed at object creation time"""
if model_name not in self.config:
log.error(
logger.error(
f'"{model_name}" is not a known model name. Please check your models.yaml file'
)
return
@ -458,7 +458,7 @@ class ModelManager(object):
model_format = mconfig.get("format", "ckpt")
if model_format == "ckpt":
weights = mconfig.weights
log.info(f"Loading {model_name} from {weights}")
logger.info(f"Loading {model_name} from {weights}")
model, width, height, model_hash = self._load_ckpt_model(
model_name, mconfig
)
@ -474,13 +474,13 @@ class ModelManager(object):
# usage statistics
toc = time.time()
log.info("Model loaded in " + "%4.2fs" % (toc - tic))
logger.info("Model loaded in " + "%4.2fs" % (toc - tic))
if self._has_cuda():
log.info(
logger.info(
"Max VRAM used to load the model: "+
"%4.2fG" % (torch.cuda.max_memory_allocated() / 1e9)
)
log.info(
logger.info(
"Current VRAM usage: "+
"%4.2fG" % (torch.cuda.memory_allocated() / 1e9)
)
@ -490,11 +490,11 @@ class ModelManager(object):
name_or_path = self.model_name_or_path(mconfig)
using_fp16 = self.precision == "float16"
log.info(f"Loading diffusers model from {name_or_path}")
logger.info(f"Loading diffusers model from {name_or_path}")
if using_fp16:
log.debug("Using faster float16 precision")
logger.debug("Using faster float16 precision")
else:
log.debug("Using more accurate float32 precision")
logger.debug("Using more accurate float32 precision")
# TODO: scan weights maybe?
pipeline_args: dict[str, Any] = dict(
@ -526,7 +526,7 @@ class ModelManager(object):
if str(e).startswith("fp16 is not a valid"):
pass
else:
log.error(
logger.error(
f"An unexpected error occurred while downloading the model: {e})"
)
if pipeline:
@ -545,7 +545,7 @@ class ModelManager(object):
# square images???
width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor
height = width
log.debug(f"Default image dimensions = {width} x {height}")
logger.debug(f"Default image dimensions = {width} x {height}")
return pipeline, width, height, model_hash
@ -562,7 +562,7 @@ class ModelManager(object):
weights = os.path.normpath(os.path.join(Globals.root, weights))
# Convert to diffusers and return a diffusers pipeline
log.info(f"Converting legacy checkpoint {model_name} into a diffusers model...")
logger.info(f"Converting legacy checkpoint {model_name} into a diffusers model...")
from . import load_pipeline_from_original_stable_diffusion_ckpt
@ -627,7 +627,7 @@ class ModelManager(object):
if model_name not in self.models:
return
log.info(f"Offloading {model_name} to CPU")
logger.info(f"Offloading {model_name} to CPU")
model = self.models[model_name]["model"]
model.offload_all()
self.current_model = None
@ -643,26 +643,26 @@ class ModelManager(object):
and option to exit if an infected file is identified.
"""
# scan model
log.debug(f"Scanning Model: {model_name}")
logger.debug(f"Scanning Model: {model_name}")
scan_result = scan_file_path(checkpoint)
if scan_result.infected_files != 0:
if scan_result.infected_files == 1:
log.critical(f"Issues Found In Model: {scan_result.issues_count}")
log.critical("The model you are trying to load seems to be infected.")
log.critical("For your safety, InvokeAI will not load this model.")
log.critical("Please use checkpoints from trusted sources.")
log.critical("Exiting InvokeAI")
logger.critical(f"Issues Found In Model: {scan_result.issues_count}")
logger.critical("The model you are trying to load seems to be infected.")
logger.critical("For your safety, InvokeAI will not load this model.")
logger.critical("Please use checkpoints from trusted sources.")
logger.critical("Exiting InvokeAI")
sys.exit()
else:
log.warning("InvokeAI was unable to scan the model you are using.")
logger.warning("InvokeAI was unable to scan the model you are using.")
model_safe_check_fail = ask_user(
"Do you want to to continue loading the model?", ["y", "n"]
)
if model_safe_check_fail.lower() != "y":
log.critical("Exiting InvokeAI")
logger.critical("Exiting InvokeAI")
sys.exit()
else:
log.debug("Model scanned ok")
logger.debug("Model scanned ok")
def import_diffuser_model(
self,
@ -779,24 +779,24 @@ class ModelManager(object):
model_path: Path = None
thing = path_url_or_repo # to save typing
log.info(f"Probing {thing} for import")
logger.info(f"Probing {thing} for import")
if thing.startswith(("http:", "https:", "ftp:")):
log.info(f"{thing} appears to be a URL")
logger.info(f"{thing} appears to be a URL")
model_path = self._resolve_path(
thing, "models/ldm/stable-diffusion-v1"
) # _resolve_path does a download if needed
elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")):
if Path(thing).stem in ["model", "diffusion_pytorch_model"]:
log.debug(f"{Path(thing).name} appears to be part of a diffusers model. Skipping import")
logger.debug(f"{Path(thing).name} appears to be part of a diffusers model. Skipping import")
return
else:
log.debug(f"{thing} appears to be a checkpoint file on disk")
logger.debug(f"{thing} appears to be a checkpoint file on disk")
model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1")
elif Path(thing).is_dir() and Path(thing, "model_index.json").exists():
log.debug(f"{thing} appears to be a diffusers file on disk")
logger.debug(f"{thing} appears to be a diffusers file on disk")
model_name = self.import_diffuser_model(
thing,
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
@ -807,30 +807,30 @@ class ModelManager(object):
elif Path(thing).is_dir():
if (Path(thing) / "model_index.json").exists():
log.debug(f"{thing} appears to be a diffusers model.")
logger.debug(f"{thing} appears to be a diffusers model.")
model_name = self.import_diffuser_model(
thing, commit_to_conf=commit_to_conf
)
else:
log.debug(f"{thing} appears to be a directory. Will scan for models to import")
logger.debug(f"{thing} appears to be a directory. Will scan for models to import")
for m in list(Path(thing).rglob("*.ckpt")) + list(
Path(thing).rglob("*.safetensors")
):
if model_name := self.heuristic_import(
str(m), commit_to_conf=commit_to_conf
):
log.info(f"{model_name} successfully imported")
logger.info(f"{model_name} successfully imported")
return model_name
elif re.match(r"^[\w.+-]+/[\w.+-]+$", thing):
log.debug(f"{thing} appears to be a HuggingFace diffusers repo_id")
logger.debug(f"{thing} appears to be a HuggingFace diffusers repo_id")
model_name = self.import_diffuser_model(
thing, commit_to_conf=commit_to_conf
)
pipeline, _, _, _ = self._load_diffusers_model(self.config[model_name])
return model_name
else:
log.warning(f"{thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id")
logger.warning(f"{thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id")
# Model_path is set in the event of a legacy checkpoint file.
# If not set, we're all done
@ -838,7 +838,7 @@ class ModelManager(object):
return
if model_path.stem in self.config: # already imported
log.debug("Already imported. Skipping")
logger.debug("Already imported. Skipping")
return model_path.stem
# another round of heuristics to guess the correct config file.
@ -854,38 +854,38 @@ class ModelManager(object):
# look for a like-named .yaml file in same directory
if model_path.with_suffix(".yaml").exists():
model_config_file = model_path.with_suffix(".yaml")
log.debug(f"Using config file {model_config_file.name}")
logger.debug(f"Using config file {model_config_file.name}")
else:
model_type = self.probe_model_type(checkpoint)
if model_type == SDLegacyType.V1:
log.debug("SD-v1 model detected")
logger.debug("SD-v1 model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
)
elif model_type == SDLegacyType.V1_INPAINT:
log.debug("SD-v1 inpainting model detected")
logger.debug("SD-v1 inpainting model detected")
model_config_file = Path(
Globals.root,
"configs/stable-diffusion/v1-inpainting-inference.yaml",
)
elif model_type == SDLegacyType.V2_v:
log.debug("SD-v2-v model detected")
logger.debug("SD-v2-v model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
)
elif model_type == SDLegacyType.V2_e:
log.debug("SD-v2-e model detected")
logger.debug("SD-v2-e model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
)
elif model_type == SDLegacyType.V2:
log.warning(
logger.warning(
f"{thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path."
)
return
else:
log.warning(
logger.warning(
f"{thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide configuration file path."
)
return
@ -902,7 +902,7 @@ class ModelManager(object):
for suffix in ["pt", "ckpt", "safetensors"]:
if (model_path.with_suffix(f".vae.{suffix}")).exists():
vae_path = model_path.with_suffix(f".vae.{suffix}")
log.debug(f"Using VAE file {vae_path.name}")
logger.debug(f"Using VAE file {vae_path.name}")
vae = None if vae_path else dict(repo_id="stabilityai/sd-vae-ft-mse")
diffuser_path = Path(
@ -948,14 +948,14 @@ class ModelManager(object):
from . import convert_ckpt_to_diffusers
if diffusers_path.exists():
log.error(
logger.error(
f"The path {str(diffusers_path)} already exists. Please move or remove it and try again."
)
return
model_name = model_name or diffusers_path.name
model_description = model_description or f"Converted version of {model_name}"
log.debug(f"Converting {model_name} to diffusers (30-60s)")
logger.debug(f"Converting {model_name} to diffusers (30-60s)")
try:
# By passing the specified VAE to the conversion function, the autoencoder
# will be built into the model rather than tacked on afterward via the config file
@ -972,10 +972,10 @@ class ModelManager(object):
vae_path=vae_path,
scan_needed=scan_needed,
)
log.debug(
logger.debug(
f"Success. Converted model is now located at {str(diffusers_path)}"
)
log.debug(f"Writing new config file entry for {model_name}")
logger.debug(f"Writing new config file entry for {model_name}")
new_config = dict(
path=str(diffusers_path),
description=model_description,
@ -986,17 +986,17 @@ class ModelManager(object):
self.add_model(model_name, new_config, True)
if commit_to_conf:
self.commit(commit_to_conf)
log.debug("Conversion succeeded")
logger.debug("Conversion succeeded")
except Exception as e:
log.warning(f"Conversion failed: {str(e)}")
log.warning(
logger.warning(f"Conversion failed: {str(e)}")
logger.warning(
"If you are trying to convert an inpainting or 2.X model, please indicate the correct config file (e.g. v1-inpainting-inference.yaml)"
)
return model_name
def search_models(self, search_folder):
log.info(f"Finding Models In: {search_folder}")
logger.info(f"Finding Models In: {search_folder}")
models_folder_ckpt = Path(search_folder).glob("**/*.ckpt")
models_folder_safetensors = Path(search_folder).glob("**/*.safetensors")
@ -1020,7 +1020,7 @@ class ModelManager(object):
num_loaded_models = len(self.models)
if num_loaded_models >= self.max_loaded_models:
least_recent_model = self._pop_oldest_model()
log.info(
logger.info(
f"Cache limit (max={self.max_loaded_models}) reached. Purging {least_recent_model}"
)
if least_recent_model is not None:
@ -1029,7 +1029,7 @@ class ModelManager(object):
def print_vram_usage(self) -> None:
if self._has_cuda:
log.info(
logger.info(
"Current VRAM usage:"+
"%4.2fG" % (torch.cuda.memory_allocated() / 1e9),
)
@ -1119,10 +1119,10 @@ class ModelManager(object):
dest = hub / model.stem
if dest.exists() and not source.exists():
continue
log.info(f"{source} => {dest}")
logger.info(f"{source} => {dest}")
if source.exists():
if dest.is_symlink():
log.warning(f"Found symlink at {dest.name}. Not migrating.")
logger.warning(f"Found symlink at {dest.name}. Not migrating.")
elif dest.exists():
if source.is_dir():
rmtree(source)
@ -1139,7 +1139,7 @@ class ModelManager(object):
]
for d in empty:
os.rmdir(d)
log.info("Migration is done. Continuing...")
logger.info("Migration is done. Continuing...")
def _resolve_path(
self, source: Union[str, Path], dest_directory: str
@ -1182,14 +1182,14 @@ class ModelManager(object):
def _add_embeddings_to_model(self, model: StableDiffusionGeneratorPipeline):
if self.embedding_path is not None:
log.info(f"Loading embeddings from {self.embedding_path}")
logger.info(f"Loading embeddings from {self.embedding_path}")
for root, _, files in os.walk(self.embedding_path):
for name in files:
ti_path = os.path.join(root, name)
model.textual_inversion_manager.load_textual_inversion(
ti_path, defer_injecting_tokens=True
)
log.info(
logger.info(
f'Textual inversion triggers: {", ".join(sorted(model.textual_inversion_manager.get_all_trigger_strings()))}'
)
@ -1212,7 +1212,7 @@ class ModelManager(object):
with open(hashpath) as f:
hash = f.read()
return hash
log.debug("Calculating sha256 hash of model files")
logger.debug("Calculating sha256 hash of model files")
tic = time.time()
sha = hashlib.sha256()
count = 0
@ -1224,7 +1224,7 @@ class ModelManager(object):
sha.update(chunk)
hash = sha.hexdigest()
toc = time.time()
log.debug(f"sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
logger.debug(f"sha256 = {hash} ({count} files hashed in", "%4.2fs)" % (toc - tic))
with open(hashpath, "w") as f:
f.write(hash)
return hash
@ -1242,13 +1242,13 @@ class ModelManager(object):
hash = f.read()
return hash
log.debug("Calculating sha256 hash of weights file")
logger.debug("Calculating sha256 hash of weights file")
tic = time.time()
sha = hashlib.sha256()
sha.update(data)
hash = sha.hexdigest()
toc = time.time()
log.debug(f"sha256 = {hash} "+"(%4.2fs)" % (toc - tic))
logger.debug(f"sha256 = {hash} "+"(%4.2fs)" % (toc - tic))
with open(hashpath, "w") as f:
f.write(hash)
@ -1269,12 +1269,12 @@ class ModelManager(object):
local_files_only=not Globals.internet_available,
)
log.debug(f"Loading diffusers VAE from {name_or_path}")
logger.debug(f"Loading diffusers VAE from {name_or_path}")
if using_fp16:
vae_args.update(torch_dtype=torch.float16)
fp_args_list = [{"revision": "fp16"}, {}]
else:
log.debug("Using more accurate float32 precision")
logger.debug("Using more accurate float32 precision")
fp_args_list = [{}]
vae = None
@ -1298,7 +1298,7 @@ class ModelManager(object):
break
if not vae and deferred_error:
log.warning(f"Could not load VAE {name_or_path}: {str(deferred_error)}")
logger.warning(f"Could not load VAE {name_or_path}: {str(deferred_error)}")
return vae
@ -1314,7 +1314,7 @@ class ModelManager(object):
for revision in repo.revisions:
hashes_to_delete.add(revision.commit_hash)
strategy = cache_info.delete_revisions(*hashes_to_delete)
log.warning(
logger.warning(
f"Deletion of this model is expected to free {strategy.expected_freed_size_str}"
)
strategy.execute()

View File

@ -18,7 +18,7 @@ from compel.prompt_parser import (
PromptParser,
)
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals
from ..stable_diffusion import InvokeAIDiffuserComponent
@ -163,8 +163,8 @@ def log_tokenization(
negative_prompt: Union[Blend, FlattenedPrompt],
tokenizer,
):
log.info(f"[TOKENLOG] Parsed Prompt: {positive_prompt}")
log.info(f"[TOKENLOG] Parsed Negative Prompt: {negative_prompt}")
logger.info(f"[TOKENLOG] Parsed Prompt: {positive_prompt}")
logger.info(f"[TOKENLOG] Parsed Negative Prompt: {negative_prompt}")
log_tokenization_for_prompt_object(positive_prompt, tokenizer)
log_tokenization_for_prompt_object(
@ -238,12 +238,12 @@ def log_tokenization_for_text(text, tokenizer, display_label=None, truncate_if_t
usedTokens += 1
if usedTokens > 0:
log.info(f'[TOKENLOG] Tokens {display_label or ""} ({usedTokens}):')
log.debug(f"{tokenized}\x1b[0m")
logger.info(f'[TOKENLOG] Tokens {display_label or ""} ({usedTokens}):')
logger.debug(f"{tokenized}\x1b[0m")
if discarded != "":
log.info(f"[TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):")
log.debug(f"{discarded}\x1b[0m")
logger.info(f"[TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):")
logger.debug(f"{discarded}\x1b[0m")
def try_parse_legacy_blend(text: str, skip_normalize: bool = False) -> Optional[Blend]:
@ -296,7 +296,7 @@ def split_weighted_subprompts(text, skip_normalize=False) -> list:
return parsed_prompts
weight_sum = sum(map(lambda x: x[1], parsed_prompts))
if weight_sum == 0:
log.warning(
logger.warning(
"Subprompt weights add up to zero. Discarding and using even weights instead."
)
equal_weight = 1 / max(len(parsed_prompts), 1)

View File

@ -1,4 +1,4 @@
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
class Restoration:
def __init__(self) -> None:
@ -10,17 +10,17 @@ class Restoration:
# Load GFPGAN
gfpgan = self.load_gfpgan(gfpgan_model_path)
if gfpgan.gfpgan_model_exists:
log.info("GFPGAN Initialized")
logger.info("GFPGAN Initialized")
else:
log.info("GFPGAN Disabled")
logger.info("GFPGAN Disabled")
gfpgan = None
# Load CodeFormer
codeformer = self.load_codeformer()
if codeformer.codeformer_model_exists:
log.info("CodeFormer Initialized")
logger.info("CodeFormer Initialized")
else:
log.info("CodeFormer Disabled")
logger.info("CodeFormer Disabled")
codeformer = None
return gfpgan, codeformer
@ -41,5 +41,5 @@ class Restoration:
from .realesrgan import ESRGAN
esrgan = ESRGAN(esrgan_bg_tile)
log.info("ESRGAN Initialized")
logger.info("ESRGAN Initialized")
return esrgan

View File

@ -5,7 +5,7 @@ import warnings
import numpy as np
import torch
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from ..globals import Globals
pretrained_model_url = (
@ -24,12 +24,12 @@ class CodeFormerRestoration:
self.codeformer_model_exists = os.path.isfile(self.model_path)
if not self.codeformer_model_exists:
log.error("NOT FOUND: CodeFormer model not found at " + self.model_path)
logger.error("NOT FOUND: CodeFormer model not found at " + self.model_path)
sys.path.append(os.path.abspath(codeformer_dir))
def process(self, image, strength, device, seed=None, fidelity=0.75):
if seed is not None:
log.info(f"CodeFormer - Restoring Faces for image seed:{seed}")
logger.info(f"CodeFormer - Restoring Faces for image seed:{seed}")
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
@ -98,7 +98,7 @@ class CodeFormerRestoration:
del output
torch.cuda.empty_cache()
except RuntimeError as error:
log.error(f"Failed inference for CodeFormer: {error}.")
logger.error(f"Failed inference for CodeFormer: {error}.")
restored_face = cropped_face
restored_face = restored_face.astype("uint8")

View File

@ -6,7 +6,7 @@ import numpy as np
import torch
from PIL import Image
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals
class GFPGAN:
@ -19,7 +19,7 @@ class GFPGAN:
self.gfpgan_model_exists = os.path.isfile(self.model_path)
if not self.gfpgan_model_exists:
log.error("NOT FOUND: GFPGAN model not found at " + self.model_path)
logger.error("NOT FOUND: GFPGAN model not found at " + self.model_path)
return None
def model_exists(self):
@ -27,7 +27,7 @@ class GFPGAN:
def process(self, image, strength: float, seed: str = None):
if seed is not None:
log.info(f"GFPGAN - Restoring Faces for image seed:{seed}")
logger.info(f"GFPGAN - Restoring Faces for image seed:{seed}")
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
@ -47,13 +47,13 @@ class GFPGAN:
except Exception:
import traceback
log.error("Error loading GFPGAN:", file=sys.stderr)
logger.error("Error loading GFPGAN:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
os.chdir(cwd)
if self.gfpgan is None:
log.warning("WARNING: GFPGAN not initialized.")
log.warning(
logger.warning("WARNING: GFPGAN not initialized.")
logger.warning(
f"Download https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth to {self.model_path}"
)

View File

@ -1,7 +1,7 @@
import math
from PIL import Image
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
class Outcrop(object):
def __init__(
@ -82,7 +82,7 @@ class Outcrop(object):
pixels = extents[direction]
# round pixels up to the nearest 64
pixels = math.ceil(pixels / 64) * 64
log.info(f"extending image {direction}ward by {pixels} pixels")
logger.info(f"extending image {direction}ward by {pixels} pixels")
image = self._rotate(image, direction)
image = self._extend(image, pixels)
image = self._rotate(image, direction, reverse=True)

View File

@ -6,7 +6,7 @@ import torch
from PIL import Image
from PIL.Image import Image as ImageType
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals
class ESRGAN:
@ -69,15 +69,15 @@ class ESRGAN:
import sys
import traceback
log.error("Error loading Real-ESRGAN:")
logger.error("Error loading Real-ESRGAN:")
print(traceback.format_exc(), file=sys.stderr)
if upsampler_scale == 0:
log.warning("Real-ESRGAN: Invalid scaling option. Image not upscaled.")
logger.warning("Real-ESRGAN: Invalid scaling option. Image not upscaled.")
return image
if seed is not None:
log.info(
logger.info(
f"Real-ESRGAN Upscaling seed:{seed}, scale:{upsampler_scale}x, tile:{self.bg_tile_size}, denoise:{denoise_str}"
)
# ESRGAN outputs images with partial transparency if given RGBA images; convert to RGB

View File

@ -14,7 +14,7 @@ from PIL import Image, ImageFilter
from transformers import AutoFeatureExtractor
import invokeai.assets.web as web_assets
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from .globals import global_cache_dir
from .util import CPU_DEVICE
@ -41,7 +41,7 @@ class SafetyChecker(object):
cache_dir=safety_model_path,
)
except Exception:
log.error(
logger.error(
"An error was encountered while installing the safety checker:"
)
print(traceback.format_exc())
@ -66,7 +66,7 @@ class SafetyChecker(object):
)
self.safety_checker.to(CPU_DEVICE) # offload
if has_nsfw_concept[0]:
log.warning(
logger.warning(
"An image with potential non-safe content has been detected. A blurred image will be returned."
)
return self.blur(image)

View File

@ -17,7 +17,7 @@ from huggingface_hub import (
hf_hub_url,
)
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals
@ -67,10 +67,10 @@ class HuggingFaceConceptsLibrary(object):
# when init, add all in dir. when not init, add only concepts added between init and now
self.concept_list.extend(list(local_concepts_to_add))
except Exception as e:
log.warning(
logger.warning(
f"Hugging Face textual inversion concepts libraries could not be loaded. The error was {str(e)}."
)
log.warning(
logger.warning(
"You may load .bin and .pt file(s) manually using the --embedding_directory argument."
)
return self.concept_list
@ -84,7 +84,7 @@ class HuggingFaceConceptsLibrary(object):
be downloaded.
"""
if not concept_name in self.list_concepts():
log.warning(
logger.warning(
f"{concept_name} is not a local embedding trigger, nor is it a HuggingFace concept. Generation will continue without the concept."
)
return None
@ -222,7 +222,7 @@ class HuggingFaceConceptsLibrary(object):
if chunk == 0:
bytes += total
log.info(f"Downloading {repo_id}...", end="")
logger.info(f"Downloading {repo_id}...", end="")
try:
for file in (
"README.md",
@ -236,22 +236,22 @@ class HuggingFaceConceptsLibrary(object):
)
except ul_error.HTTPError as e:
if e.code == 404:
log.warning(
logger.warning(
f"Concept {concept_name} is not known to the Hugging Face library. Generation will continue without the concept."
)
else:
log.warning(
logger.warning(
f"Failed to download {concept_name}/{file} ({str(e)}. Generation will continue without the concept.)"
)
os.rmdir(dest)
return False
except ul_error.URLError as e:
log.error(
logger.error(
f"an error occurred while downloading {concept_name}: {str(e)}. This may reflect a network issue. Generation will continue without the concept."
)
os.rmdir(dest)
return False
log.info("...{:.2f}Kb".format(bytes / 1024))
logger.info("...{:.2f}Kb".format(bytes / 1024))
return succeeded
def _concept_id(self, concept_name: str) -> str:

View File

@ -13,7 +13,7 @@ from compel.cross_attention_control import Arguments
from diffusers.models.attention_processor import AttentionProcessor
from torch import nn
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from ...util import torch_dtype
class CrossAttentionType(enum.Enum):
@ -421,7 +421,7 @@ def get_cross_attention_modules(
expected_count = 16
if cross_attention_modules_in_model_count != expected_count:
# non-fatal error but .swap() won't work.
log.error(
logger.error(
f"Error! CrossAttentionControl found an unexpected number of {cross_attention_class} modules in the model "
+ f"(expected {expected_count}, found {cross_attention_modules_in_model_count}). Either monkey-patching failed "
+ "or some assumption has changed about the structure of the model itself. Please fix the monkey-patching, "

View File

@ -8,7 +8,7 @@ import torch
from diffusers.models.attention_processor import AttentionProcessor
from typing_extensions import TypeAlias
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals
from .cross_attention_control import (
@ -467,13 +467,13 @@ class InvokeAIDiffuserComponent:
outside = torch.count_nonzero(
(latents < -current_threshold) | (latents > current_threshold)
)
log.info(
logger.info(
f"Threshold: %={percent_through} threshold={current_threshold:.3f} (of {threshold:.3f})"
)
log.debug(
logger.debug(
f"min, mean, max = {minval:.3f}, {mean:.3f}, {maxval:.3f}\tstd={std}"
)
log.debug(
logger.debug(
f"{outside / latents.numel() * 100:.2f}% values outside threshold"
)
@ -501,10 +501,10 @@ class InvokeAIDiffuserComponent:
)
if self.debug_thresholding:
log.debug(
logger.debug(
f"min, , max = {minval:.3f}, , {maxval:.3f}\t(scaled by {scale})"
)
log.debug(
logger.debug(
f"{num_altered / latents.numel() * 100:.2f}% values altered"
)

View File

@ -10,7 +10,7 @@ from torchvision.utils import make_grid
# import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
@ -191,7 +191,7 @@ def mkdirs(paths):
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + "_archived_" + get_timestamp()
log.error("Path already exists. Rename it to [{:s}]".format(new_name))
logger.error("Path already exists. Rename it to [{:s}]".format(new_name))
os.replace(path, new_name)
os.makedirs(path)

View File

@ -10,7 +10,7 @@ from compel.embeddings_provider import BaseTextualInversionManager
from picklescan.scanner import scan_file_path
from transformers import CLIPTextModel, CLIPTokenizer
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from .concepts_lib import HuggingFaceConceptsLibrary
@dataclass
@ -60,12 +60,12 @@ class TextualInversionManager(BaseTextualInversionManager):
or self.has_textual_inversion_for_trigger_string(concept_name)
or self.has_textual_inversion_for_trigger_string(f"<{concept_name}>")
): # in case a token with literal angle brackets encountered
log.info(f"Loaded local embedding for trigger {concept_name}")
logger.info(f"Loaded local embedding for trigger {concept_name}")
continue
bin_file = self.hf_concepts_library.get_concept_model_path(concept_name)
if not bin_file:
continue
log.info(f"Loaded remote embedding for trigger {concept_name}")
logger.info(f"Loaded remote embedding for trigger {concept_name}")
self.load_textual_inversion(bin_file)
self.hf_concepts_library.concepts_loaded[concept_name] = True
@ -86,7 +86,7 @@ class TextualInversionManager(BaseTextualInversionManager):
embedding_list = self._parse_embedding(str(ckpt_path))
for embedding_info in embedding_list:
if (self.text_encoder.get_input_embeddings().weight.data[0].shape[0] != embedding_info.token_dim):
log.warning(
logger.warning(
f"Notice: {ckpt_path.parents[0].name}/{ckpt_path.name} was trained on a model with an incompatible token dimension: {self.text_encoder.get_input_embeddings().weight.data[0].shape[0]} vs {embedding_info.token_dim}."
)
continue
@ -106,7 +106,7 @@ class TextualInversionManager(BaseTextualInversionManager):
if ckpt_path.name == "learned_embeds.bin"
else f"<{ckpt_path.stem}>"
)
log.info(
logger.info(
f"{sourcefile}: Trigger token '{trigger_str}' is already claimed by '{self.trigger_to_sourcefile[trigger_str]}'. Trigger this concept with {replacement_trigger_str}"
)
trigger_str = replacement_trigger_str
@ -121,8 +121,8 @@ class TextualInversionManager(BaseTextualInversionManager):
self.trigger_to_sourcefile[trigger_str] = sourcefile
except ValueError as e:
log.debug(f'Ignoring incompatible embedding {embedding_info["name"]}')
log.debug(f"The error was {str(e)}")
logger.debug(f'Ignoring incompatible embedding {embedding_info["name"]}')
logger.debug(f"The error was {str(e)}")
def _add_textual_inversion(
self, trigger_str, embedding, defer_injecting_tokens=False
@ -134,7 +134,7 @@ class TextualInversionManager(BaseTextualInversionManager):
:return: The token id for the added embedding, either existing or newly-added.
"""
if trigger_str in [ti.trigger_string for ti in self.textual_inversions]:
log.warning(
logger.warning(
f"TextualInversionManager refusing to overwrite already-loaded token '{trigger_str}'"
)
return
@ -156,10 +156,10 @@ class TextualInversionManager(BaseTextualInversionManager):
except ValueError as e:
if str(e).startswith("Warning"):
log.warning(f"{str(e)}")
logger.warning(f"{str(e)}")
else:
traceback.print_exc()
log.error(
logger.error(
f"TextualInversionManager was unable to add a textual inversion with trigger string {trigger_str}."
)
raise
@ -220,16 +220,16 @@ class TextualInversionManager(BaseTextualInversionManager):
for ti in self.textual_inversions:
if ti.trigger_token_id is None and ti.trigger_string in prompt_string:
if ti.embedding_vector_length > 1:
log.info(
logger.info(
f"Preparing tokens for textual inversion {ti.trigger_string}..."
)
try:
self._inject_tokens_and_assign_embeddings(ti)
except ValueError as e:
log.debug(
logger.debug(
f"Ignoring incompatible embedding trigger {ti.trigger_string}"
)
log.debug(f"The error was {str(e)}")
logger.debug(f"The error was {str(e)}")
continue
injected_token_ids.append(ti.trigger_token_id)
injected_token_ids.extend(ti.pad_token_ids)
@ -307,16 +307,16 @@ class TextualInversionManager(BaseTextualInversionManager):
if suffix in [".pt",".ckpt",".bin"]:
scan_result = scan_file_path(embedding_file)
if scan_result.infected_files > 0:
log.critical(
logger.critical(
f"Security Issues Found in Model: {scan_result.issues_count}"
)
log.critical("For your safety, InvokeAI will not load this embed.")
logger.critical("For your safety, InvokeAI will not load this embed.")
return list()
ckpt = torch.load(embedding_file,map_location="cpu")
else:
ckpt = safetensors.torch.load_file(embedding_file)
except Exception as e:
log.warning(f"Notice: unrecognized embedding file format: {embedding_file}: {e}")
logger.warning(f"Notice: unrecognized embedding file format: {embedding_file}: {e}")
return list()
# try to figure out what kind of embedding file it is and parse accordingly
@ -335,7 +335,7 @@ class TextualInversionManager(BaseTextualInversionManager):
def _parse_embedding_v1(self, embedding_ckpt: dict, file_path: str)->List[EmbeddingInfo]:
basename = Path(file_path).stem
log.debug(f'Loading v1 embedding file: {basename}')
logger.debug(f'Loading v1 embedding file: {basename}')
embeddings = list()
token_counter = -1
@ -366,7 +366,7 @@ class TextualInversionManager(BaseTextualInversionManager):
This handles embedding .pt file variant #2.
"""
basename = Path(file_path).stem
log.debug(f'Loading v2 embedding file: {basename}')
logger.debug(f'Loading v2 embedding file: {basename}')
embeddings = list()
if isinstance(
@ -385,7 +385,7 @@ class TextualInversionManager(BaseTextualInversionManager):
)
embeddings.append(embedding_info)
else:
log.warning(f"{basename}: Unrecognized embedding format")
logger.warning(f"{basename}: Unrecognized embedding format")
return embeddings
@ -394,7 +394,7 @@ class TextualInversionManager(BaseTextualInversionManager):
Parse 'version 3' of the .pt textual inversion embedding files.
"""
basename = Path(file_path).stem
log.debug(f'Loading v3 embedding file: {basename}')
logger.debug(f'Loading v3 embedding file: {basename}')
embedding = embedding_ckpt['emb_params']
embedding_info = EmbeddingInfo(
name = f'<{basename}>',
@ -412,11 +412,11 @@ class TextualInversionManager(BaseTextualInversionManager):
basename = Path(filepath).stem
short_path = Path(filepath).parents[0].name+'/'+Path(filepath).name
log.debug(f'Loading v4 embedding file: {short_path}')
logger.debug(f'Loading v4 embedding file: {short_path}')
embeddings = list()
if list(embedding_ckpt.keys()) == 0:
log.warning(f"Invalid embeddings file: {short_path}")
logger.warning(f"Invalid embeddings file: {short_path}")
else:
for token,embedding in embedding_ckpt.items():
embedding_info = EmbeddingInfo(

View File

@ -26,7 +26,7 @@ Console messages:
Another way:
import invokeai.backend.util.logging as ialog
ialog.debug('this is a debugging message')
ialogger.debug('this is a debugging message')
"""
import logging

View File

@ -18,7 +18,7 @@ import torch
from PIL import Image, ImageDraw, ImageFont
from tqdm import tqdm
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from .devices import torch_dtype
@ -39,7 +39,7 @@ def log_txt_as_img(wh, xc, size=10):
try:
draw.text((0, 0), lines, fill="black", font=font)
except UnicodeEncodeError:
log.warning("Cant encode string for logging. Skipping.")
logger.warning("Cant encode string for logging. Skipping.")
txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
txts.append(txt)
@ -81,7 +81,7 @@ def mean_flat(tensor):
def count_params(model, verbose=False):
total_params = sum(p.numel() for p in model.parameters())
if verbose:
log.debug(
logger.debug(
f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params."
)
return total_params
@ -133,7 +133,7 @@ def parallel_data_prefetch(
raise ValueError("list expected but function got ndarray.")
elif isinstance(data, abc.Iterable):
if isinstance(data, dict):
log.warning(
logger.warning(
'"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.'
)
data = list(data.values())
@ -176,7 +176,7 @@ def parallel_data_prefetch(
processes += [p]
# start processes
log.info("Start prefetching...")
logger.info("Start prefetching...")
import time
start = time.time()
@ -195,7 +195,7 @@ def parallel_data_prefetch(
gather_res[res[0]] = res[1]
except Exception as e:
log.error("Exception: ", e)
logger.error("Exception: ", e)
for p in processes:
p.terminate()
@ -203,7 +203,7 @@ def parallel_data_prefetch(
finally:
for p in processes:
p.join()
log.info(f"Prefetching complete. [{time.time() - start} sec.]")
logger.info(f"Prefetching complete. [{time.time() - start} sec.]")
if target_data_type == "ndarray":
if not isinstance(gather_res[0], np.ndarray):
@ -319,23 +319,23 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
resp = requests.get(url, headers=header, stream=True) # new request with range
if exist_size > content_length:
log.warning("corrupt existing file found. re-downloading")
logger.warning("corrupt existing file found. re-downloading")
os.remove(dest)
exist_size = 0
if resp.status_code == 416 or exist_size == content_length:
log.warning(f"{dest}: complete file found. Skipping.")
logger.warning(f"{dest}: complete file found. Skipping.")
return dest
elif resp.status_code == 206 or exist_size > 0:
log.warning(f"{dest}: partial file found. Resuming...")
logger.warning(f"{dest}: partial file found. Resuming...")
elif resp.status_code != 200:
log.error(f"An error occurred during downloading {dest}: {resp.reason}")
logger.error(f"An error occurred during downloading {dest}: {resp.reason}")
else:
log.error(f"{dest}: Downloading...")
logger.error(f"{dest}: Downloading...")
try:
if content_length < 2000:
log.error(f"ERROR DOWNLOADING {url}: {resp.text}")
logger.error(f"ERROR DOWNLOADING {url}: {resp.text}")
return None
with open(dest, open_mode) as file, tqdm(
@ -350,7 +350,7 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
size = file.write(data)
bar.update(size)
except Exception as e:
log.error(f"An error occurred while downloading {dest}: {str(e)}")
logger.error(f"An error occurred while downloading {dest}: {str(e)}")
return None
return dest

View File

@ -19,7 +19,7 @@ from PIL import Image
from PIL.Image import Image as ImageType
from werkzeug.utils import secure_filename
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
import invokeai.frontend.web.dist as frontend
from .. import Generate
@ -214,7 +214,7 @@ class InvokeAIWebServer:
self.load_socketio_listeners(self.socketio)
if args.gui:
log.info("Launching Invoke AI GUI")
logger.info("Launching Invoke AI GUI")
try:
from flaskwebgui import FlaskUI
@ -232,16 +232,16 @@ class InvokeAIWebServer:
sys.exit(0)
else:
useSSL = args.certfile or args.keyfile
log.info("Started Invoke AI Web Server")
logger.info("Started Invoke AI Web Server")
if self.host == "0.0.0.0":
log.info(
logger.info(
f"Point your browser at http{'s' if useSSL else ''}://localhost:{self.port} or use the host's DNS name or IP address."
)
else:
log.info(
logger.info(
"Default host address now 127.0.0.1 (localhost). Use --host 0.0.0.0 to bind any address."
)
log.info(
logger.info(
f"Point your browser at http{'s' if useSSL else ''}://{self.host}:{self.port}"
)
if not useSSL:
@ -274,7 +274,7 @@ class InvokeAIWebServer:
# path for thumbnail images
self.thumbnail_image_path = os.path.join(self.result_path, "thumbnails/")
# txt log
self.log_path = os.path.join(self.result_path, "invoke_log.txt")
self.log_path = os.path.join(self.result_path, "invoke_logger.txt")
# make all output paths
[
os.makedirs(path, exist_ok=True)
@ -291,7 +291,7 @@ class InvokeAIWebServer:
def load_socketio_listeners(self, socketio):
@socketio.on("requestSystemConfig")
def handle_request_capabilities():
log.info("System config requested")
logger.info("System config requested")
config = self.get_system_config()
config["model_list"] = self.generate.model_manager.list_models()
config["infill_methods"] = infill_methods()
@ -331,7 +331,7 @@ class InvokeAIWebServer:
if model_name in current_model_list:
update = True
log.info(f"Adding New Model: {model_name}")
logger.info(f"Adding New Model: {model_name}")
self.generate.model_manager.add_model(
model_name=model_name,
@ -349,14 +349,14 @@ class InvokeAIWebServer:
"update": update,
},
)
log.info(f"New Model Added: {model_name}")
logger.info(f"New Model Added: {model_name}")
except Exception as e:
self.handle_exceptions(e)
@socketio.on("deleteModel")
def handle_delete_model(model_name: str):
try:
log.info(f"Deleting Model: {model_name}")
logger.info(f"Deleting Model: {model_name}")
self.generate.model_manager.del_model(model_name)
self.generate.model_manager.commit(opt.conf)
updated_model_list = self.generate.model_manager.list_models()
@ -367,14 +367,14 @@ class InvokeAIWebServer:
"model_list": updated_model_list,
},
)
log.info(f"Model Deleted: {model_name}")
logger.info(f"Model Deleted: {model_name}")
except Exception as e:
self.handle_exceptions(e)
@socketio.on("requestModelChange")
def handle_set_model(model_name: str):
try:
log.info(f"Model change requested: {model_name}")
logger.info(f"Model change requested: {model_name}")
model = self.generate.set_model(model_name)
model_list = self.generate.model_manager.list_models()
if model is None:
@ -455,7 +455,7 @@ class InvokeAIWebServer:
"update": True,
},
)
log.info(f"Model Converted: {model_name}")
logger.info(f"Model Converted: {model_name}")
except Exception as e:
self.handle_exceptions(e)
@ -491,7 +491,7 @@ class InvokeAIWebServer:
if vae := self.generate.model_manager.config[models_to_merge[0]].get(
"vae", None
):
log.info(f"Using configured VAE assigned to {models_to_merge[0]}")
logger.info(f"Using configured VAE assigned to {models_to_merge[0]}")
merged_model_config.update(vae=vae)
self.generate.model_manager.import_diffuser_model(
@ -508,8 +508,8 @@ class InvokeAIWebServer:
"update": True,
},
)
log.info(f"Models Merged: {models_to_merge}")
log.info(f"New Model Added: {model_merge_info['merged_model_name']}")
logger.info(f"Models Merged: {models_to_merge}")
logger.info(f"New Model Added: {model_merge_info['merged_model_name']}")
except Exception as e:
self.handle_exceptions(e)
@ -699,7 +699,7 @@ class InvokeAIWebServer:
}
)
except Exception as e:
log.info(f"Unable to load {path}")
logger.info(f"Unable to load {path}")
socketio.emit(
"error", {"message": f"Unable to load {path}: {str(e)}"}
)
@ -736,9 +736,9 @@ class InvokeAIWebServer:
printable_parameters["init_mask"][:64] + "..."
)
log.info(f"Image Generation Parameters:\n\n{printable_parameters}\n")
log.info(f"ESRGAN Parameters: {esrgan_parameters}")
log.info(f"Facetool Parameters: {facetool_parameters}")
logger.info(f"Image Generation Parameters:\n\n{printable_parameters}\n")
logger.info(f"ESRGAN Parameters: {esrgan_parameters}")
logger.info(f"Facetool Parameters: {facetool_parameters}")
self.generate_images(
generation_parameters,
@ -751,7 +751,7 @@ class InvokeAIWebServer:
@socketio.on("runPostprocessing")
def handle_run_postprocessing(original_image, postprocessing_parameters):
try:
log.info(
logger.info(
f'Postprocessing requested for "{original_image["url"]}": {postprocessing_parameters}'
)
@ -862,14 +862,14 @@ class InvokeAIWebServer:
@socketio.on("cancel")
def handle_cancel():
log.info("Cancel processing requested")
logger.info("Cancel processing requested")
self.canceled.set()
# TODO: I think this needs a safety mechanism.
@socketio.on("deleteImage")
def handle_delete_image(url, thumbnail, uuid, category):
try:
log.info(f'Delete requested "{url}"')
logger.info(f'Delete requested "{url}"')
from send2trash import send2trash
path = self.get_image_path_from_url(url)
@ -1264,7 +1264,7 @@ class InvokeAIWebServer:
image, os.path.basename(path), self.thumbnail_image_path
)
log.info(f'Image generated: "{path}"\n')
logger.info(f'Image generated: "{path}"\n')
self.write_log_message(f'[Generated] "{path}": {command}')
if progress.total_iterations > progress.current_iteration:
@ -1330,7 +1330,7 @@ class InvokeAIWebServer:
except Exception as e:
# Clear the CUDA cache on an exception
self.empty_cuda_cache()
log.error(e)
logger.error(e)
self.handle_exceptions(e)
def empty_cuda_cache(self):

View File

@ -16,7 +16,7 @@ if sys.platform == "darwin":
import pyparsing # type: ignore
import invokeai.version as invokeai
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from ...backend import Generate, ModelManager
from ...backend.args import Args, dream_cmd_from_png, metadata_dumps, metadata_from_png
@ -70,7 +70,7 @@ def main():
# run any post-install patches needed
run_patches()
log.info(f"Internet connectivity is {Globals.internet_available}")
logger.info(f"Internet connectivity is {Globals.internet_available}")
if not args.conf:
config_file = os.path.join(Globals.root, "configs", "models.yaml")
@ -79,8 +79,8 @@ def main():
opt, FileNotFoundError(f"The file {config_file} could not be found.")
)
log.info(f"{invokeai.__app_name__}, version {invokeai.__version__}")
log.info(f'InvokeAI runtime directory is "{Globals.root}"')
logger.info(f"{invokeai.__app_name__}, version {invokeai.__version__}")
logger.info(f'InvokeAI runtime directory is "{Globals.root}"')
# loading here to avoid long delays on startup
# these two lines prevent a horrible warning message from appearing
@ -122,7 +122,7 @@ def main():
else:
raise FileNotFoundError(f"{opt.infile} not found.")
except (FileNotFoundError, IOError) as e:
log.critical('Aborted',exc_info=True)
logger.critical('Aborted',exc_info=True)
sys.exit(-1)
# creating a Generate object:
@ -144,11 +144,11 @@ def main():
except (FileNotFoundError, TypeError, AssertionError) as e:
report_model_error(opt, e)
except (IOError, KeyError):
log.critical("Aborted",exc_info=True)
logger.critical("Aborted",exc_info=True)
sys.exit(-1)
if opt.seamless:
log.info("Changed to seamless tiling mode")
logger.info("Changed to seamless tiling mode")
# preload the model
try:
@ -181,7 +181,7 @@ def main():
f'\nGoodbye!\nYou can start InvokeAI again by running the "invoke.bat" (or "invoke.sh") script from {Globals.root}'
)
except Exception:
log.error("An error occurred",exc_info=True)
logger.error("An error occurred",exc_info=True)
# TODO: main_loop() has gotten busy. Needs to be refactored.
def main_loop(gen, opt):
@ -247,7 +247,7 @@ def main_loop(gen, opt):
if not opt.prompt:
oldargs = metadata_from_png(opt.init_img)
opt.prompt = oldargs.prompt
log.info(f'Retrieved old prompt "{opt.prompt}" from {opt.init_img}')
logger.info(f'Retrieved old prompt "{opt.prompt}" from {opt.init_img}')
except (OSError, AttributeError, KeyError):
pass
@ -264,9 +264,9 @@ def main_loop(gen, opt):
if opt.init_img is not None and re.match("^-\\d+$", opt.init_img):
try:
opt.init_img = last_results[int(opt.init_img)][0]
log.info(f"Reusing previous image {opt.init_img}")
logger.info(f"Reusing previous image {opt.init_img}")
except IndexError:
log.info(f"No previous initial image at position {opt.init_img} found")
logger.info(f"No previous initial image at position {opt.init_img} found")
opt.init_img = None
continue
@ -287,9 +287,9 @@ def main_loop(gen, opt):
if opt.seed is not None and opt.seed < 0 and operation != "postprocess":
try:
opt.seed = last_results[opt.seed][1]
log.info(f"Reusing previous seed {opt.seed}")
logger.info(f"Reusing previous seed {opt.seed}")
except IndexError:
log.info(f"No previous seed at position {opt.seed} found")
logger.info(f"No previous seed at position {opt.seed} found")
opt.seed = None
continue
@ -308,7 +308,7 @@ def main_loop(gen, opt):
subdir = subdir[: (path_max - 39 - len(os.path.abspath(opt.outdir)))]
current_outdir = os.path.join(opt.outdir, subdir)
log.info('Writing files to directory: "' + current_outdir + '"')
logger.info('Writing files to directory: "' + current_outdir + '"')
# make sure the output directory exists
if not os.path.exists(current_outdir):
@ -438,13 +438,13 @@ def main_loop(gen, opt):
**vars(opt),
)
except (PromptParser.ParsingException, pyparsing.ParseException):
log.error("An error occurred while processing your prompt",exc_info=True)
logger.error("An error occurred while processing your prompt",exc_info=True)
elif operation == "postprocess":
log.info(f"fixing {opt.prompt}")
logger.info(f"fixing {opt.prompt}")
opt.last_operation = do_postprocess(gen, opt, image_writer)
elif operation == "mask":
log.info(f"generating masks from {opt.prompt}")
logger.info(f"generating masks from {opt.prompt}")
do_textmask(gen, opt, image_writer)
if opt.grid and len(grid_images) > 0:
@ -468,11 +468,11 @@ def main_loop(gen, opt):
results = [[path, formatted_dream_prompt]]
except AssertionError:
log.error(e)
logger.error(e)
continue
except OSError as e:
log.error(e)
logger.error(e)
continue
print("Outputs:")
@ -511,7 +511,7 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple:
gen.set_model(model_name)
add_embedding_terms(gen, completer)
except KeyError as e:
log.error(e)
logger.error(e)
except Exception as e:
report_model_error(opt, e)
completer.add_history(command)
@ -525,7 +525,7 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple:
elif command.startswith("!import"):
path = shlex.split(command)
if len(path) < 2:
log.warning(
logger.warning(
"please provide (1) a URL to a .ckpt file to import; (2) a local path to a .ckpt file; or (3) a diffusers repository id in the form stabilityai/stable-diffusion-2-1"
)
else:
@ -539,7 +539,7 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple:
elif command.startswith(("!convert", "!optimize")):
path = shlex.split(command)
if len(path) < 2:
log.warning("please provide the path to a .ckpt or .safetensors model")
logger.warning("please provide the path to a .ckpt or .safetensors model")
else:
try:
convert_model(path[1], gen, opt, completer)
@ -551,7 +551,7 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple:
elif command.startswith("!edit"):
path = shlex.split(command)
if len(path) < 2:
log.warning("please provide the name of a model")
logger.warning("please provide the name of a model")
else:
edit_model(path[1], gen, opt, completer)
completer.add_history(command)
@ -560,7 +560,7 @@ def do_command(command: str, gen, opt: Args, completer) -> tuple:
elif command.startswith("!del"):
path = shlex.split(command)
if len(path) < 2:
log.warning("please provide the name of a model")
logger.warning("please provide the name of a model")
else:
del_config(path[1], gen, opt, completer)
completer.add_history(command)
@ -641,7 +641,7 @@ def import_model(model_path: str, gen, opt, completer):
default_name = url_attachment_name(model_path)
default_name = Path(default_name).stem
except Exception:
log.warning(f"A problem occurred while assigning the name of the downloaded model",exc_info=True)
logger.warning(f"A problem occurred while assigning the name of the downloaded model",exc_info=True)
model_name, model_desc = _get_model_name_and_desc(
gen.model_manager,
completer,
@ -662,11 +662,11 @@ def import_model(model_path: str, gen, opt, completer):
model_config_file=config_file,
)
if not imported_name:
log.error("Aborting import.")
logger.error("Aborting import.")
return
if not _verify_load(imported_name, gen):
log.error("model failed to load. Discarding configuration entry")
logger.error("model failed to load. Discarding configuration entry")
gen.model_manager.del_model(imported_name)
return
if click.confirm("Make this the default model?", default=False):
@ -674,7 +674,7 @@ def import_model(model_path: str, gen, opt, completer):
gen.model_manager.commit(opt.conf)
completer.update_models(gen.model_manager.list_models())
log.info(f"{imported_name} successfully installed")
logger.info(f"{imported_name} successfully installed")
def _pick_configuration_file(completer)->Path:
print(
@ -718,21 +718,21 @@ Please select the type of this model:
return choice
def _verify_load(model_name: str, gen) -> bool:
log.info("Verifying that new model loads...")
logger.info("Verifying that new model loads...")
current_model = gen.model_name
try:
if not gen.set_model(model_name):
return
except Exception as e:
log.warning(f"model failed to load: {str(e)}")
log.warning(
logger.warning(f"model failed to load: {str(e)}")
logger.warning(
"** note that importing 2.X checkpoints is not supported. Please use !convert_model instead."
)
return False
if click.confirm("Keep model loaded?", default=True):
gen.set_model(model_name)
else:
log.info("Restoring previous model")
logger.info("Restoring previous model")
gen.set_model(current_model)
return True
@ -755,7 +755,7 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
ckpt_path = None
original_config_file = None
if model_name_or_path == gen.model_name:
log.warning("Can't convert the active model. !switch to another model first. **")
logger.warning("Can't convert the active model. !switch to another model first. **")
return
elif model_info := manager.model_info(model_name_or_path):
if "weights" in model_info:
@ -765,7 +765,7 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
model_description = model_info["description"]
vae_path = model_info.get("vae")
else:
log.warning(f"{model_name_or_path} is not a legacy .ckpt weights file")
logger.warning(f"{model_name_or_path} is not a legacy .ckpt weights file")
return
model_name = manager.convert_and_import(
ckpt_path,
@ -786,16 +786,16 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
manager.commit(opt.conf)
if click.confirm(f"Delete the original .ckpt file at {ckpt_path}?", default=False):
ckpt_path.unlink(missing_ok=True)
log.warning(f"{ckpt_path} deleted")
logger.warning(f"{ckpt_path} deleted")
def del_config(model_name: str, gen, opt, completer):
current_model = gen.model_name
if model_name == current_model:
log.warning("Can't delete active model. !switch to another model first. **")
logger.warning("Can't delete active model. !switch to another model first. **")
return
if model_name not in gen.model_manager.config:
log.warning(f"Unknown model {model_name}")
logger.warning(f"Unknown model {model_name}")
return
if not click.confirm(
@ -808,17 +808,17 @@ def del_config(model_name: str, gen, opt, completer):
)
gen.model_manager.del_model(model_name, delete_files=delete_completely)
gen.model_manager.commit(opt.conf)
log.warning(f"{model_name} deleted")
logger.warning(f"{model_name} deleted")
completer.update_models(gen.model_manager.list_models())
def edit_model(model_name: str, gen, opt, completer):
manager = gen.model_manager
if not (info := manager.model_info(model_name)):
log.warning(f"** Unknown model {model_name}")
logger.warning(f"** Unknown model {model_name}")
return
print()
log.info(f"Editing model {model_name} from configuration file {opt.conf}")
logger.info(f"Editing model {model_name} from configuration file {opt.conf}")
new_name = _get_model_name(manager.list_models(), completer, model_name)
for attribute in info.keys():
@ -856,7 +856,7 @@ def edit_model(model_name: str, gen, opt, completer):
manager.set_default_model(new_name)
manager.commit(opt.conf)
completer.update_models(manager.list_models())
log.info("Model successfully updated")
logger.info("Model successfully updated")
def _get_model_name(existing_names, completer, default_name: str = "") -> str:
@ -867,11 +867,11 @@ def _get_model_name(existing_names, completer, default_name: str = "") -> str:
if len(model_name) == 0:
model_name = default_name
if not re.match("^[\w._+:/-]+$", model_name):
log.warning(
logger.warning(
'model name must contain only words, digits and the characters "._+:/-" **'
)
elif model_name != default_name and model_name in existing_names:
log.warning(f"the name {model_name} is already in use. Pick another.")
logger.warning(f"the name {model_name} is already in use. Pick another.")
else:
done = True
return model_name
@ -938,10 +938,10 @@ def do_postprocess(gen, opt, callback):
opt=opt,
)
except OSError:
log.error(f"{file_path}: file could not be read",exc_info=True)
logger.error(f"{file_path}: file could not be read",exc_info=True)
return
except (KeyError, AttributeError):
log.error(f"an error occurred while applying the {tool} postprocessor",exc_info=True)
logger.error(f"an error occurred while applying the {tool} postprocessor",exc_info=True)
return
return opt.last_operation
@ -996,12 +996,12 @@ def prepare_image_metadata(
try:
filename = opt.fnformat.format(**wildcards)
except KeyError as e:
log.error(
logger.error(
f"The filename format contains an unknown key '{e.args[0]}'. Will use {{prefix}}.{{seed}}.png' instead"
)
filename = f"{prefix}.{seed}.png"
except IndexError:
log.error(
logger.error(
"The filename format is broken or complete. Will use '{prefix}.{seed}.png' instead"
)
filename = f"{prefix}.{seed}.png"
@ -1091,14 +1091,14 @@ def split_variations(variations_string) -> list:
for part in variations_string.split(","):
seed_and_weight = part.split(":")
if len(seed_and_weight) != 2:
log.warning(f'Could not parse with_variation part "{part}"')
logger.warning(f'Could not parse with_variation part "{part}"')
broken = True
break
try:
seed = int(seed_and_weight[0])
weight = float(seed_and_weight[1])
except ValueError:
log.warning(f'Could not parse with_variation part "{part}"')
logger.warning(f'Could not parse with_variation part "{part}"')
broken = True
break
parts.append([seed, weight])
@ -1122,23 +1122,23 @@ def load_face_restoration(opt):
opt.gfpgan_model_path
)
else:
log.info("Face restoration disabled")
logger.info("Face restoration disabled")
if opt.esrgan:
esrgan = restoration.load_esrgan(opt.esrgan_bg_tile)
else:
log.info("Upscaling disabled")
logger.info("Upscaling disabled")
else:
log.info("Face restoration and upscaling disabled")
logger.info("Face restoration and upscaling disabled")
except (ModuleNotFoundError, ImportError):
print(traceback.format_exc(), file=sys.stderr)
log.info("You may need to install the ESRGAN and/or GFPGAN modules")
logger.info("You may need to install the ESRGAN and/or GFPGAN modules")
return gfpgan, codeformer, esrgan
def make_step_callback(gen, opt, prefix):
destination = os.path.join(opt.outdir, "intermediates", prefix)
os.makedirs(destination, exist_ok=True)
log.info(f"Intermediate images will be written into {destination}")
logger.info(f"Intermediate images will be written into {destination}")
def callback(state: PipelineIntermediateState):
latents = state.latents
@ -1180,11 +1180,11 @@ def retrieve_dream_command(opt, command, completer):
try:
cmd = dream_cmd_from_png(path)
except OSError:
log.error(f"{tokens[0]}: file could not be read")
logger.error(f"{tokens[0]}: file could not be read")
except (KeyError, AttributeError, IndexError):
log.error(f"{tokens[0]}: file has no metadata")
logger.error(f"{tokens[0]}: file has no metadata")
except:
log.error(f"{tokens[0]}: file could not be processed")
logger.error(f"{tokens[0]}: file could not be processed")
if len(cmd) > 0:
completer.set_line(cmd)
@ -1193,7 +1193,7 @@ def write_commands(opt, file_path: str, outfilepath: str):
try:
paths = sorted(list(Path(dir).glob(basename)))
except ValueError:
log.error(f'"{basename}": unacceptable pattern')
logger.error(f'"{basename}": unacceptable pattern')
return
commands = []
@ -1202,9 +1202,9 @@ def write_commands(opt, file_path: str, outfilepath: str):
try:
cmd = dream_cmd_from_png(path)
except (KeyError, AttributeError, IndexError):
log.error(f"{path}: file has no metadata")
logger.error(f"{path}: file has no metadata")
except:
log.error(f"{path}: file could not be processed")
logger.error(f"{path}: file could not be processed")
if cmd:
commands.append(f"# {path}")
commands.append(cmd)
@ -1214,17 +1214,17 @@ def write_commands(opt, file_path: str, outfilepath: str):
outfilepath = os.path.join(opt.outdir, basename)
with open(outfilepath, "w", encoding="utf-8") as f:
f.write("\n".join(commands))
log.info(f"File {outfilepath} with commands created")
logger.info(f"File {outfilepath} with commands created")
def report_model_error(opt: Namespace, e: Exception):
log.warning(f'An error occurred while attempting to initialize the model: "{str(e)}"')
log.warning(
logger.warning(f'An error occurred while attempting to initialize the model: "{str(e)}"')
logger.warning(
"This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
)
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
if yes_to_all:
log.warning(
logger.warning(
"Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
)
else:
@ -1234,7 +1234,7 @@ def report_model_error(opt: Namespace, e: Exception):
):
return
log.info("invokeai-configure is launching....\n")
logger.info("invokeai-configure is launching....\n")
# Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed
@ -1251,7 +1251,7 @@ def report_model_error(opt: Namespace, e: Exception):
from ..install import invokeai_configure
invokeai_configure()
log.warning("InvokeAI will now restart")
logger.warning("InvokeAI will now restart")
sys.argv = previous_args
main() # would rather do a os.exec(), but doesn't exist?
sys.exit(0)

View File

@ -22,7 +22,7 @@ import torch
from npyscreen import widget
from omegaconf import OmegaConf
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals, global_config_dir
from ...backend.config.model_install_backend import (
@ -456,7 +456,7 @@ def main():
Globals.root = os.path.expanduser(get_root(opt.root) or "")
if not global_config_dir().exists():
log.info(
logger.info(
"Your InvokeAI root directory is not set up. Calling invokeai-configure."
)
from invokeai.frontend.install import invokeai_configure
@ -467,17 +467,17 @@ def main():
try:
select_and_download_models(opt)
except AssertionError as e:
log.error(e)
logger.error(e)
sys.exit(-1)
except KeyboardInterrupt:
log.info("Goodbye! Come back soon.")
logger.info("Goodbye! Come back soon.")
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
log.error(
logger.error(
"Insufficient vertical space for the interface. Please make your window taller and try again"
)
elif str(e).startswith("addwstr"):
log.error(
logger.error(
"Insufficient horizontal space for the interface. Please make your window wider and try again."
)

View File

@ -28,7 +28,7 @@ from ...backend.globals import (
global_set_root,
)
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from ...backend.model_management import ModelManager
from ...frontend.install.widgets import FloatTitleSlider
@ -115,7 +115,7 @@ def merge_diffusion_models_and_commit(
model_name=merged_model_name, description=f'Merge of models {", ".join(models)}'
)
if vae := model_manager.config[models[0]].get("vae", None):
log.info(f"Using configured VAE assigned to {models[0]}")
logger.info(f"Using configured VAE assigned to {models[0]}")
import_args.update(vae=vae)
model_manager.import_diffuser_model(dump_path, **import_args)
model_manager.commit(config_file)
@ -414,7 +414,7 @@ def run_gui(args: Namespace):
args = mergeapp.merge_arguments
merge_diffusion_models_and_commit(**args)
log.info(f'Models merged into new model: "{args["merged_model_name"]}".')
logger.info(f'Models merged into new model: "{args["merged_model_name"]}".')
def run_cli(args: Namespace):
@ -425,7 +425,7 @@ def run_cli(args: Namespace):
if not args.merged_model_name:
args.merged_model_name = "+".join(args.models)
log.info(
logger.info(
f'No --merged_model_name provided. Defaulting to "{args.merged_model_name}"'
)
@ -435,7 +435,7 @@ def run_cli(args: Namespace):
), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.'
merge_diffusion_models_and_commit(**vars(args))
log.info(f'Models merged into new model: "{args.merged_model_name}".')
logger.info(f'Models merged into new model: "{args.merged_model_name}".')
def main():
@ -455,16 +455,16 @@ def main():
run_cli(args)
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
log.error(
logger.error(
"You need to have at least two diffusers models defined in models.yaml in order to merge"
)
else:
log.error(
logger.error(
"Not enough room for the user interface. Try making this window larger."
)
sys.exit(-1)
except Exception as e:
log.error(e)
logger.error(e)
sys.exit(-1)
except KeyboardInterrupt:
sys.exit(-1)

View File

@ -20,7 +20,7 @@ import npyscreen
from npyscreen import widget
from omegaconf import OmegaConf
import invokeai.backend.util.logging as log
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals, global_set_root
from ...backend.training import do_textual_inversion_training, parse_args
@ -369,14 +369,14 @@ def copy_to_embeddings_folder(args: dict):
dest_dir_name = args["placeholder_token"].strip("<>")
destination = Path(Globals.root, "embeddings", dest_dir_name)
os.makedirs(destination, exist_ok=True)
log.info(f"Training completed. Copying learned_embeds.bin into {str(destination)}")
logger.info(f"Training completed. Copying learned_embeds.bin into {str(destination)}")
shutil.copy(source, destination)
if (
input("Delete training logs and intermediate checkpoints? [y] ") or "y"
).startswith(("y", "Y")):
shutil.rmtree(Path(args["output_dir"]))
else:
log.info(f'Keeping {args["output_dir"]}')
logger.info(f'Keeping {args["output_dir"]}')
def save_args(args: dict):
@ -423,10 +423,10 @@ def do_front_end(args: Namespace):
do_textual_inversion_training(**args)
copy_to_embeddings_folder(args)
except Exception as e:
log.error("An exception occurred during training. The exception was:")
log.error(str(e))
log.error("DETAILS:")
log.error(traceback.format_exc())
logger.error("An exception occurred during training. The exception was:")
logger.error(str(e))
logger.error("DETAILS:")
logger.error(traceback.format_exc())
def main():
@ -438,21 +438,21 @@ def main():
else:
do_textual_inversion_training(**vars(args))
except AssertionError as e:
log.error(e)
logger.error(e)
sys.exit(-1)
except KeyboardInterrupt:
pass
except (widget.NotEnoughSpaceForWidget, Exception) as e:
if str(e).startswith("Height of 1 allocated"):
log.error(
logger.error(
"You need to have at least one diffusers models defined in models.yaml in order to train"
)
elif str(e).startswith("addwstr"):
log.error(
logger.error(
"Not enough window space for the interface. Please make your window larger and try again."
)
else:
log.error(e)
logger.error(e)
sys.exit(-1)