mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
replaced remaining print statements with log.*()
This commit is contained in:
parent
0b0e6fe448
commit
b164330e3c
@ -3,8 +3,9 @@
|
|||||||
import os
|
import os
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
|
|
||||||
from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
|
import invokeai.backend.util.logging as log
|
||||||
|
|
||||||
|
from ..services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
|
||||||
from ...backend import Globals
|
from ...backend import Globals
|
||||||
from ..services.model_manager_initializer import get_model_manager
|
from ..services.model_manager_initializer import get_model_manager
|
||||||
from ..services.restoration_services import RestorationServices
|
from ..services.restoration_services import RestorationServices
|
||||||
@ -47,8 +48,7 @@ class ApiDependencies:
|
|||||||
Globals.disable_xformers = not config.xformers
|
Globals.disable_xformers = not config.xformers
|
||||||
Globals.ckpt_convert = config.ckpt_convert
|
Globals.ckpt_convert = config.ckpt_convert
|
||||||
|
|
||||||
# TODO: Use a logger
|
log.info(f"Internet connectivity is {Globals.internet_available}")
|
||||||
print(f">> Internet connectivity is {Globals.internet_available}")
|
|
||||||
|
|
||||||
events = FastAPIEventService(event_handler_id)
|
events = FastAPIEventService(event_handler_id)
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@ import shutil
|
|||||||
import asyncio
|
import asyncio
|
||||||
from typing import Annotated, Any, List, Literal, Optional, Union
|
from typing import Annotated, Any, List, Literal, Optional, Union
|
||||||
|
|
||||||
|
import invokeai.backend.util.logging as log
|
||||||
from fastapi.routing import APIRouter, HTTPException
|
from fastapi.routing import APIRouter, HTTPException
|
||||||
from pydantic import BaseModel, Field, parse_obj_as
|
from pydantic import BaseModel, Field, parse_obj_as
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -115,16 +116,16 @@ async def delete_model(model_name: str) -> None:
|
|||||||
model_exists = model_name in model_names
|
model_exists = model_name in model_names
|
||||||
|
|
||||||
# check if model exists
|
# check if model exists
|
||||||
print(f">> Checking for model {model_name}...")
|
log.info(f"Checking for model {model_name}...")
|
||||||
|
|
||||||
if model_exists:
|
if model_exists:
|
||||||
print(f">> Deleting Model: {model_name}")
|
log.info(f"Deleting Model: {model_name}")
|
||||||
ApiDependencies.invoker.services.model_manager.del_model(model_name, delete_files=True)
|
ApiDependencies.invoker.services.model_manager.del_model(model_name, delete_files=True)
|
||||||
print(f">> Model Deleted: {model_name}")
|
log.info(f"Model Deleted: {model_name}")
|
||||||
raise HTTPException(status_code=204, detail=f"Model '{model_name}' deleted successfully")
|
raise HTTPException(status_code=204, detail=f"Model '{model_name}' deleted successfully")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print(f">> Model not found")
|
log.error(f"Model not found")
|
||||||
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found")
|
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found")
|
||||||
|
|
||||||
|
|
||||||
@ -248,4 +249,4 @@ async def delete_model(model_name: str) -> None:
|
|||||||
# )
|
# )
|
||||||
# print(f">> Models Merged: {models_to_merge}")
|
# print(f">> Models Merged: {models_to_merge}")
|
||||||
# print(f">> New Model Added: {model_merge_info['merged_model_name']}")
|
# print(f">> New Model Added: {model_merge_info['merged_model_name']}")
|
||||||
# except Exception as e:
|
# except Exception as e:
|
||||||
|
@ -7,6 +7,7 @@ from pydantic import BaseModel, Field
|
|||||||
import networkx as nx
|
import networkx as nx
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
import invokeai.backend.util.logging as log
|
||||||
from ..models.image import ImageField
|
from ..models.image import ImageField
|
||||||
from ..services.graph import GraphExecutionState
|
from ..services.graph import GraphExecutionState
|
||||||
from ..services.invoker import Invoker
|
from ..services.invoker import Invoker
|
||||||
@ -183,7 +184,7 @@ class HistoryCommand(BaseCommand):
|
|||||||
for i in range(min(self.count, len(history))):
|
for i in range(min(self.count, len(history))):
|
||||||
entry_id = history[-1 - i]
|
entry_id = history[-1 - i]
|
||||||
entry = context.get_session().graph.get_node(entry_id)
|
entry = context.get_session().graph.get_node(entry_id)
|
||||||
print(f"{entry_id}: {get_invocation_command(entry)}")
|
log.info(f"{entry_id}: {get_invocation_command(entry)}")
|
||||||
|
|
||||||
|
|
||||||
class SetDefaultCommand(BaseCommand):
|
class SetDefaultCommand(BaseCommand):
|
||||||
|
@ -10,6 +10,7 @@ import shlex
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Dict, Literal, get_args, get_type_hints, get_origin
|
from typing import List, Dict, Literal, get_args, get_type_hints, get_origin
|
||||||
|
|
||||||
|
import invokeai.backend.util.logging as log
|
||||||
from ...backend import ModelManager, Globals
|
from ...backend import ModelManager, Globals
|
||||||
from ..invocations.baseinvocation import BaseInvocation
|
from ..invocations.baseinvocation import BaseInvocation
|
||||||
from .commands import BaseCommand
|
from .commands import BaseCommand
|
||||||
@ -160,8 +161,8 @@ def set_autocompleter(model_manager: ModelManager) -> Completer:
|
|||||||
pass
|
pass
|
||||||
except OSError: # file likely corrupted
|
except OSError: # file likely corrupted
|
||||||
newname = f"{histfile}.old"
|
newname = f"{histfile}.old"
|
||||||
print(
|
log.error(
|
||||||
f"## Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}"
|
f"Your history file {histfile} couldn't be loaded and may be corrupted. Renaming it to {newname}"
|
||||||
)
|
)
|
||||||
histfile.replace(Path(newname))
|
histfile.replace(Path(newname))
|
||||||
atexit.register(readline.write_history_file, histfile)
|
atexit.register(readline.write_history_file, histfile)
|
||||||
|
@ -13,6 +13,7 @@ from typing import (
|
|||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from pydantic.fields import Field
|
from pydantic.fields import Field
|
||||||
|
|
||||||
|
import invokeai.backend.util.logging as log
|
||||||
from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
|
from .services.latent_storage import DiskLatentsStorage, ForwardCacheLatentsStorage
|
||||||
|
|
||||||
from ..backend import Args
|
from ..backend import Args
|
||||||
@ -125,7 +126,7 @@ def invoke_all(context: CliContext):
|
|||||||
# Print any errors
|
# Print any errors
|
||||||
if context.session.has_error():
|
if context.session.has_error():
|
||||||
for n in context.session.errors:
|
for n in context.session.errors:
|
||||||
print(
|
log.error(
|
||||||
f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {context.session.errors[n]}"
|
f"Error in node {n} (source node {context.session.prepared_source_mapping[n]}): {context.session.errors[n]}"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -279,12 +280,12 @@ def invoke_cli():
|
|||||||
invoke_all(context)
|
invoke_all(context)
|
||||||
|
|
||||||
except InvalidArgs:
|
except InvalidArgs:
|
||||||
print('Invalid command, use "help" to list commands')
|
log.warning('Invalid command, use "help" to list commands')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
except SessionError:
|
except SessionError:
|
||||||
# Start a new session
|
# Start a new session
|
||||||
print("Session error: creating a new session")
|
log.warning("Session error: creating a new session")
|
||||||
context.session = context.invoker.create_execution_state()
|
context.session = context.invoker.create_execution_state()
|
||||||
|
|
||||||
except ExitCli:
|
except ExitCli:
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
import invokeai.backend.util.logging as log
|
||||||
from invokeai.app.invocations.baseinvocation import InvocationContext
|
from invokeai.app.invocations.baseinvocation import InvocationContext
|
||||||
from invokeai.backend.model_management.model_manager import ModelManager
|
from invokeai.backend.model_management.model_manager import ModelManager
|
||||||
|
|
||||||
@ -7,5 +8,5 @@ def choose_model(model_manager: ModelManager, model_name: str):
|
|||||||
if model_manager.valid_model(model_name):
|
if model_manager.valid_model(model_name):
|
||||||
return model_manager.get_model(model_name)
|
return model_manager.get_model(model_name)
|
||||||
else:
|
else:
|
||||||
print(f"* Warning: '{model_name}' is not a valid model name. Using default model instead.")
|
log.warning(f"'{model_name}' is not a valid model name. Using default model instead.")
|
||||||
return model_manager.get_model()
|
return model_manager.get_model()
|
||||||
|
@ -7,6 +7,7 @@ from omegaconf import OmegaConf
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import invokeai.version
|
import invokeai.version
|
||||||
|
import invokeai.backend.util.logging as log
|
||||||
from ...backend import ModelManager
|
from ...backend import ModelManager
|
||||||
from ...backend.util import choose_precision, choose_torch_device
|
from ...backend.util import choose_precision, choose_torch_device
|
||||||
from ...backend import Globals
|
from ...backend import Globals
|
||||||
@ -20,8 +21,8 @@ def get_model_manager(config: Args) -> ModelManager:
|
|||||||
config, FileNotFoundError(f"The file {config_file} could not be found.")
|
config, FileNotFoundError(f"The file {config_file} could not be found.")
|
||||||
)
|
)
|
||||||
|
|
||||||
print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}")
|
log.info(f"{invokeai.version.__app_name__}, version {invokeai.version.__version__}")
|
||||||
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
log.info(f'InvokeAI runtime directory is "{Globals.root}"')
|
||||||
|
|
||||||
# these two lines prevent a horrible warning message from appearing
|
# these two lines prevent a horrible warning message from appearing
|
||||||
# when the frozen CLIP tokenizer is imported
|
# when the frozen CLIP tokenizer is imported
|
||||||
@ -66,7 +67,7 @@ def get_model_manager(config: Args) -> ModelManager:
|
|||||||
except (FileNotFoundError, TypeError, AssertionError) as e:
|
except (FileNotFoundError, TypeError, AssertionError) as e:
|
||||||
report_model_error(config, e)
|
report_model_error(config, e)
|
||||||
except (IOError, KeyError) as e:
|
except (IOError, KeyError) as e:
|
||||||
print(f"{e}. Aborting.")
|
log.error(f"{e}. Aborting.")
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
# try to autoconvert new models
|
# try to autoconvert new models
|
||||||
@ -80,14 +81,14 @@ def get_model_manager(config: Args) -> ModelManager:
|
|||||||
return model_manager
|
return model_manager
|
||||||
|
|
||||||
def report_model_error(opt: Namespace, e: Exception):
|
def report_model_error(opt: Namespace, e: Exception):
|
||||||
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
|
log.error(f'An error occurred while attempting to initialize the model: "{str(e)}"')
|
||||||
print(
|
log.error(
|
||||||
"** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
|
"This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
|
||||||
)
|
)
|
||||||
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
|
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
|
||||||
if yes_to_all:
|
if yes_to_all:
|
||||||
print(
|
log.warning
|
||||||
"** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
|
"Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
response = input(
|
response = input(
|
||||||
@ -96,7 +97,7 @@ def report_model_error(opt: Namespace, e: Exception):
|
|||||||
if response.startswith(("n", "N")):
|
if response.startswith(("n", "N")):
|
||||||
return
|
return
|
||||||
|
|
||||||
print("invokeai-configure is launching....\n")
|
log.info("invokeai-configure is launching....\n")
|
||||||
|
|
||||||
# Match arguments that were set on the CLI
|
# Match arguments that were set on the CLI
|
||||||
# only the arguments accepted by the configuration script are parsed
|
# only the arguments accepted by the configuration script are parsed
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
import torch
|
import torch
|
||||||
|
import invokeai.backend.util.logging as log
|
||||||
from ...backend.restoration import Restoration
|
from ...backend.restoration import Restoration
|
||||||
from ...backend.util import choose_torch_device, CPU_DEVICE, MPS_DEVICE
|
from ...backend.util import choose_torch_device, CPU_DEVICE, MPS_DEVICE
|
||||||
|
|
||||||
@ -20,16 +21,16 @@ class RestorationServices:
|
|||||||
args.gfpgan_model_path
|
args.gfpgan_model_path
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
print(">> Face restoration disabled")
|
log.info("Face restoration disabled")
|
||||||
if args.esrgan:
|
if args.esrgan:
|
||||||
esrgan = restoration.load_esrgan(args.esrgan_bg_tile)
|
esrgan = restoration.load_esrgan(args.esrgan_bg_tile)
|
||||||
else:
|
else:
|
||||||
print(">> Upscaling disabled")
|
log.info("Upscaling disabled")
|
||||||
else:
|
else:
|
||||||
print(">> Face restoration and upscaling disabled")
|
log.info("Face restoration and upscaling disabled")
|
||||||
except (ModuleNotFoundError, ImportError):
|
except (ModuleNotFoundError, ImportError):
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
|
log.info("You may need to install the ESRGAN and/or GFPGAN modules")
|
||||||
self.device = torch.device(choose_torch_device())
|
self.device = torch.device(choose_torch_device())
|
||||||
self.gfpgan = gfpgan
|
self.gfpgan = gfpgan
|
||||||
self.codeformer = codeformer
|
self.codeformer = codeformer
|
||||||
@ -58,15 +59,15 @@ class RestorationServices:
|
|||||||
if self.gfpgan is not None or self.codeformer is not None:
|
if self.gfpgan is not None or self.codeformer is not None:
|
||||||
if facetool == "gfpgan":
|
if facetool == "gfpgan":
|
||||||
if self.gfpgan is None:
|
if self.gfpgan is None:
|
||||||
print(
|
log.info(
|
||||||
">> GFPGAN not found. Face restoration is disabled."
|
"GFPGAN not found. Face restoration is disabled."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
image = self.gfpgan.process(image, strength, seed)
|
image = self.gfpgan.process(image, strength, seed)
|
||||||
if facetool == "codeformer":
|
if facetool == "codeformer":
|
||||||
if self.codeformer is None:
|
if self.codeformer is None:
|
||||||
print(
|
log.info(
|
||||||
">> CodeFormer not found. Face restoration is disabled."
|
"CodeFormer not found. Face restoration is disabled."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
cf_device = (
|
cf_device = (
|
||||||
@ -80,7 +81,7 @@ class RestorationServices:
|
|||||||
fidelity=codeformer_fidelity,
|
fidelity=codeformer_fidelity,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
print(">> Face Restoration is disabled.")
|
log.info("Face Restoration is disabled.")
|
||||||
if upscale is not None:
|
if upscale is not None:
|
||||||
if self.esrgan is not None:
|
if self.esrgan is not None:
|
||||||
if len(upscale) < 2:
|
if len(upscale) < 2:
|
||||||
@ -93,10 +94,10 @@ class RestorationServices:
|
|||||||
denoise_str=upscale_denoise_str,
|
denoise_str=upscale_denoise_str,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
print(">> ESRGAN is disabled. Image not upscaled.")
|
log.info("ESRGAN is disabled. Image not upscaled.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(
|
log.info(
|
||||||
f">> Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}"
|
f"Error running RealESRGAN or GFPGAN. Your image was not upscaled.\n{e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
if image_callback is not None:
|
if image_callback is not None:
|
||||||
|
@ -1088,7 +1088,7 @@ class Generate:
|
|||||||
image = img
|
image = img
|
||||||
log.info(f"using provided input image of size {image.width}x{image.height}")
|
log.info(f"using provided input image of size {image.width}x{image.height}")
|
||||||
elif isinstance(img, str):
|
elif isinstance(img, str):
|
||||||
assert os.path.exists(img), f">> {img}: File not found"
|
assert os.path.exists(img), f"{img}: File not found"
|
||||||
|
|
||||||
image = Image.open(img)
|
image = Image.open(img)
|
||||||
log.info(
|
log.info(
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
|
# Copyright (c) 2023 Lincoln D. Stein and The InvokeAI Development Team
|
||||||
|
|
||||||
"""invokeai.util.logging
|
"""invokeai.util.logging
|
||||||
Copyright 2023 The InvokeAI Development Team
|
|
||||||
|
|
||||||
Logging class for InvokeAI that produces console messages that follow
|
Logging class for InvokeAI that produces console messages that follow
|
||||||
the conventions established in InvokeAI 1.X through 2.X.
|
the conventions established in InvokeAI 1.X through 2.X.
|
||||||
|
@ -22,6 +22,7 @@ import torch
|
|||||||
from npyscreen import widget
|
from npyscreen import widget
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
|
|
||||||
|
import invokeai.backend.logging as log
|
||||||
from invokeai.backend.globals import Globals, global_config_dir
|
from invokeai.backend.globals import Globals, global_config_dir
|
||||||
|
|
||||||
from ...backend.config.model_install_backend import (
|
from ...backend.config.model_install_backend import (
|
||||||
@ -455,8 +456,8 @@ def main():
|
|||||||
Globals.root = os.path.expanduser(get_root(opt.root) or "")
|
Globals.root = os.path.expanduser(get_root(opt.root) or "")
|
||||||
|
|
||||||
if not global_config_dir().exists():
|
if not global_config_dir().exists():
|
||||||
print(
|
log.info(
|
||||||
">> Your InvokeAI root directory is not set up. Calling invokeai-configure."
|
"Your InvokeAI root directory is not set up. Calling invokeai-configure."
|
||||||
)
|
)
|
||||||
from invokeai.frontend.install import invokeai_configure
|
from invokeai.frontend.install import invokeai_configure
|
||||||
|
|
||||||
@ -466,18 +467,18 @@ def main():
|
|||||||
try:
|
try:
|
||||||
select_and_download_models(opt)
|
select_and_download_models(opt)
|
||||||
except AssertionError as e:
|
except AssertionError as e:
|
||||||
print(str(e))
|
log.error(e)
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print("\nGoodbye! Come back soon.")
|
log.info("Goodbye! Come back soon.")
|
||||||
except widget.NotEnoughSpaceForWidget as e:
|
except widget.NotEnoughSpaceForWidget as e:
|
||||||
if str(e).startswith("Height of 1 allocated"):
|
if str(e).startswith("Height of 1 allocated"):
|
||||||
print(
|
log.error(
|
||||||
"** Insufficient vertical space for the interface. Please make your window taller and try again"
|
"Insufficient vertical space for the interface. Please make your window taller and try again"
|
||||||
)
|
)
|
||||||
elif str(e).startswith("addwstr"):
|
elif str(e).startswith("addwstr"):
|
||||||
print(
|
log.error(
|
||||||
"** Insufficient horizontal space for the interface. Please make your window wider and try again."
|
"Insufficient horizontal space for the interface. Please make your window wider and try again."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -27,6 +27,8 @@ from ...backend.globals import (
|
|||||||
global_models_dir,
|
global_models_dir,
|
||||||
global_set_root,
|
global_set_root,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
import invokeai.backend.util.logging as log
|
||||||
from ...backend.model_management import ModelManager
|
from ...backend.model_management import ModelManager
|
||||||
from ...frontend.install.widgets import FloatTitleSlider
|
from ...frontend.install.widgets import FloatTitleSlider
|
||||||
|
|
||||||
@ -113,7 +115,7 @@ def merge_diffusion_models_and_commit(
|
|||||||
model_name=merged_model_name, description=f'Merge of models {", ".join(models)}'
|
model_name=merged_model_name, description=f'Merge of models {", ".join(models)}'
|
||||||
)
|
)
|
||||||
if vae := model_manager.config[models[0]].get("vae", None):
|
if vae := model_manager.config[models[0]].get("vae", None):
|
||||||
print(f">> Using configured VAE assigned to {models[0]}")
|
log.info(f"Using configured VAE assigned to {models[0]}")
|
||||||
import_args.update(vae=vae)
|
import_args.update(vae=vae)
|
||||||
model_manager.import_diffuser_model(dump_path, **import_args)
|
model_manager.import_diffuser_model(dump_path, **import_args)
|
||||||
model_manager.commit(config_file)
|
model_manager.commit(config_file)
|
||||||
@ -391,10 +393,8 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
|
|||||||
for name in self.model_manager.model_names()
|
for name in self.model_manager.model_names()
|
||||||
if self.model_manager.model_info(name).get("format") == "diffusers"
|
if self.model_manager.model_info(name).get("format") == "diffusers"
|
||||||
]
|
]
|
||||||
print(model_names)
|
|
||||||
return sorted(model_names)
|
return sorted(model_names)
|
||||||
|
|
||||||
|
|
||||||
class Mergeapp(npyscreen.NPSAppManaged):
|
class Mergeapp(npyscreen.NPSAppManaged):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@ -414,7 +414,7 @@ def run_gui(args: Namespace):
|
|||||||
|
|
||||||
args = mergeapp.merge_arguments
|
args = mergeapp.merge_arguments
|
||||||
merge_diffusion_models_and_commit(**args)
|
merge_diffusion_models_and_commit(**args)
|
||||||
print(f'>> Models merged into new model: "{args["merged_model_name"]}".')
|
log.info(f'Models merged into new model: "{args["merged_model_name"]}".')
|
||||||
|
|
||||||
|
|
||||||
def run_cli(args: Namespace):
|
def run_cli(args: Namespace):
|
||||||
@ -425,8 +425,8 @@ def run_cli(args: Namespace):
|
|||||||
|
|
||||||
if not args.merged_model_name:
|
if not args.merged_model_name:
|
||||||
args.merged_model_name = "+".join(args.models)
|
args.merged_model_name = "+".join(args.models)
|
||||||
print(
|
log.info(
|
||||||
f'>> No --merged_model_name provided. Defaulting to "{args.merged_model_name}"'
|
f'No --merged_model_name provided. Defaulting to "{args.merged_model_name}"'
|
||||||
)
|
)
|
||||||
|
|
||||||
model_manager = ModelManager(OmegaConf.load(global_config_file()))
|
model_manager = ModelManager(OmegaConf.load(global_config_file()))
|
||||||
@ -435,7 +435,7 @@ def run_cli(args: Namespace):
|
|||||||
), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.'
|
), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.'
|
||||||
|
|
||||||
merge_diffusion_models_and_commit(**vars(args))
|
merge_diffusion_models_and_commit(**vars(args))
|
||||||
print(f'>> Models merged into new model: "{args.merged_model_name}".')
|
log.info(f'Models merged into new model: "{args.merged_model_name}".')
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@ -455,17 +455,16 @@ def main():
|
|||||||
run_cli(args)
|
run_cli(args)
|
||||||
except widget.NotEnoughSpaceForWidget as e:
|
except widget.NotEnoughSpaceForWidget as e:
|
||||||
if str(e).startswith("Height of 1 allocated"):
|
if str(e).startswith("Height of 1 allocated"):
|
||||||
print(
|
log.error(
|
||||||
"** You need to have at least two diffusers models defined in models.yaml in order to merge"
|
"You need to have at least two diffusers models defined in models.yaml in order to merge"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
print(
|
log.error(
|
||||||
"** Not enough room for the user interface. Try making this window larger."
|
"Not enough room for the user interface. Try making this window larger."
|
||||||
)
|
)
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
except Exception:
|
except Exception as e:
|
||||||
print(">> An error occurred:")
|
log.error(e)
|
||||||
traceback.print_exc()
|
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
@ -20,6 +20,7 @@ import npyscreen
|
|||||||
from npyscreen import widget
|
from npyscreen import widget
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
|
|
||||||
|
import invokeai.backend.util.logging as log
|
||||||
from invokeai.backend.globals import Globals, global_set_root
|
from invokeai.backend.globals import Globals, global_set_root
|
||||||
|
|
||||||
from ...backend.training import do_textual_inversion_training, parse_args
|
from ...backend.training import do_textual_inversion_training, parse_args
|
||||||
@ -368,14 +369,14 @@ def copy_to_embeddings_folder(args: dict):
|
|||||||
dest_dir_name = args["placeholder_token"].strip("<>")
|
dest_dir_name = args["placeholder_token"].strip("<>")
|
||||||
destination = Path(Globals.root, "embeddings", dest_dir_name)
|
destination = Path(Globals.root, "embeddings", dest_dir_name)
|
||||||
os.makedirs(destination, exist_ok=True)
|
os.makedirs(destination, exist_ok=True)
|
||||||
print(f">> Training completed. Copying learned_embeds.bin into {str(destination)}")
|
log.info(f"Training completed. Copying learned_embeds.bin into {str(destination)}")
|
||||||
shutil.copy(source, destination)
|
shutil.copy(source, destination)
|
||||||
if (
|
if (
|
||||||
input("Delete training logs and intermediate checkpoints? [y] ") or "y"
|
input("Delete training logs and intermediate checkpoints? [y] ") or "y"
|
||||||
).startswith(("y", "Y")):
|
).startswith(("y", "Y")):
|
||||||
shutil.rmtree(Path(args["output_dir"]))
|
shutil.rmtree(Path(args["output_dir"]))
|
||||||
else:
|
else:
|
||||||
print(f'>> Keeping {args["output_dir"]}')
|
log.info(f'Keeping {args["output_dir"]}')
|
||||||
|
|
||||||
|
|
||||||
def save_args(args: dict):
|
def save_args(args: dict):
|
||||||
@ -422,10 +423,10 @@ def do_front_end(args: Namespace):
|
|||||||
do_textual_inversion_training(**args)
|
do_textual_inversion_training(**args)
|
||||||
copy_to_embeddings_folder(args)
|
copy_to_embeddings_folder(args)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("** An exception occurred during training. The exception was:")
|
log.error("An exception occurred during training. The exception was:")
|
||||||
print(str(e))
|
log.error(str(e))
|
||||||
print("** DETAILS:")
|
log.error("DETAILS:")
|
||||||
print(traceback.format_exc())
|
log.error(traceback.format_exc())
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@ -437,21 +438,21 @@ def main():
|
|||||||
else:
|
else:
|
||||||
do_textual_inversion_training(**vars(args))
|
do_textual_inversion_training(**vars(args))
|
||||||
except AssertionError as e:
|
except AssertionError as e:
|
||||||
print(str(e))
|
log.error(e)
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
pass
|
pass
|
||||||
except (widget.NotEnoughSpaceForWidget, Exception) as e:
|
except (widget.NotEnoughSpaceForWidget, Exception) as e:
|
||||||
if str(e).startswith("Height of 1 allocated"):
|
if str(e).startswith("Height of 1 allocated"):
|
||||||
print(
|
log.error(
|
||||||
"** You need to have at least one diffusers models defined in models.yaml in order to train"
|
"You need to have at least one diffusers models defined in models.yaml in order to train"
|
||||||
)
|
)
|
||||||
elif str(e).startswith("addwstr"):
|
elif str(e).startswith("addwstr"):
|
||||||
print(
|
log.error(
|
||||||
"** Not enough window space for the interface. Please make your window larger and try again."
|
"Not enough window space for the interface. Please make your window larger and try again."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
print(f"** An error has occurred: {str(e)}")
|
log.error(e)
|
||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user