InvokeAI/invokeai/app/services/generate_initializer.py

256 lines
8.5 KiB
Python
Raw Normal View History

import os
import sys
import traceback
2023-03-03 06:02:00 +00:00
from argparse import Namespace
2023-03-03 06:02:00 +00:00
import invokeai.version
from invokeai.backend import Generate, ModelManager
feat(ui): migrated theming to chakra build(ui): fix husky path build(ui): fix hmr issue, remove emotion cache build(ui): clean up package.json build(ui): update gh action and npm scripts feat(ui): wip port lightbox to chakra theme feat(ui): wip use chakra theme tokens feat(ui): Add status text to main loading spinner feat(ui): wip chakra theme tweaking feat(ui): simply iaisimplemenu button feat(ui): wip chakra theming feat(ui): Theme Management feat(ui): Add Ocean Blue Theme feat(ui): wip lightbox fix(ui): fix lightbox mouse feat(ui): set default theme variants feat(ui): model manager chakra theme chore(ui): lint feat(ui): remove last scss feat(ui): fix switch theme feat(ui): Theme Cleanup feat(ui): Stylize Search Models Found List feat(ui): hide scrollbars feat(ui): fix floating button position feat(ui): Scrollbar Styling fix broken scripts This PR fixes the following scripts: 1) Scripts that can be executed within the repo's scripts directory. Note that these are for development testing and are not intended to be exposed to the user. configure_invokeai.py - configuration dream.py - the legacy CLI images2prompt.py - legacy "dream prompt" retriever invoke-new.py - new nodes-based CLI invoke.py - the legacy CLI under another name make_models_markdown_table.py - a utility used during the release/doc process pypi_helper.py - another utility used during the release process sd-metadata.py - retrieve JSON-formatted metadata from a PNG file 2) Scripts that are installed by pip install. They get placed into the venv's PATH and are intended to be the official entry points: invokeai-node-cli - new nodes-based CLI invokeai-node-web - new nodes-based web server invokeai - legacy CLI invokeai-configure - install time configuration script invokeai-merge - model merging script invokeai-ti - textual inversion script invokeai-model-install - model installer invokeai-update - update script invokeai-metadata" - retrieve JSON-formatted metadata from PNG files protect invocations against black autoformatting deps: upgrade to diffusers 0.14, safetensors 0.3, transformers 4.26, accelerate 0.16
2023-03-03 23:41:46 +00:00
from ...backend import Globals
2023-03-03 06:02:00 +00:00
# TODO: most of this code should be split into individual services as the Generate.py code is deprecated
def get_generate(args, config) -> Generate:
if not args.conf:
2023-03-03 06:02:00 +00:00
config_file = os.path.join(Globals.root, "configs", "models.yaml")
if not os.path.exists(config_file):
2023-03-03 06:02:00 +00:00
report_model_error(
args, FileNotFoundError(f"The file {config_file} could not be found.")
)
2023-03-03 06:02:00 +00:00
print(f">> {invokeai.version.__app_name__}, version {invokeai.version.__version__}")
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
# these two lines prevent a horrible warning message from appearing
# when the frozen CLIP tokenizer is imported
import transformers # type: ignore
2023-03-03 06:02:00 +00:00
transformers.logging.set_verbosity_error()
import diffusers
2023-03-03 06:02:00 +00:00
diffusers.logging.set_verbosity_error()
# Loading Face Restoration and ESRGAN Modules
2023-03-03 06:02:00 +00:00
gfpgan, codeformer, esrgan = load_face_restoration(args)
# normalize the config directory relative to root
if not os.path.isabs(args.conf):
2023-03-03 06:02:00 +00:00
args.conf = os.path.normpath(os.path.join(Globals.root, args.conf))
if args.embeddings:
if not os.path.isabs(args.embedding_path):
2023-03-03 06:02:00 +00:00
embedding_path = os.path.normpath(
os.path.join(Globals.root, args.embedding_path)
)
else:
embedding_path = args.embedding_path
else:
embedding_path = None
# migrate legacy models
ModelManager.migrate_models()
# load the infile as a list of lines
if args.infile:
try:
if os.path.isfile(args.infile):
2023-03-03 06:02:00 +00:00
infile = open(args.infile, "r", encoding="utf-8")
elif args.infile == "-": # stdin
infile = sys.stdin
else:
2023-03-03 06:02:00 +00:00
raise FileNotFoundError(f"{args.infile} not found.")
except (FileNotFoundError, IOError) as e:
2023-03-03 06:02:00 +00:00
print(f"{e}. Aborting.")
sys.exit(-1)
# creating a Generate object:
try:
gen = Generate(
2023-03-03 06:02:00 +00:00
conf=args.conf,
model=args.model,
sampler_name=args.sampler_name,
embedding_path=embedding_path,
full_precision=args.full_precision,
precision=args.precision,
gfpgan=gfpgan,
codeformer=codeformer,
esrgan=esrgan,
free_gpu_mem=args.free_gpu_mem,
safety_checker=args.safety_checker,
max_loaded_models=args.max_loaded_models,
)
except (FileNotFoundError, TypeError, AssertionError) as e:
2023-03-03 06:02:00 +00:00
report_model_error(opt, e)
except (IOError, KeyError) as e:
2023-03-03 06:02:00 +00:00
print(f"{e}. Aborting.")
sys.exit(-1)
if args.seamless:
print(">> changed to seamless tiling mode")
# preload the model
try:
gen.load_model()
except KeyError:
pass
except Exception as e:
report_model_error(args, e)
# try to autoconvert new models
# autoimport new .ckpt files
if path := args.autoconvert:
gen.model_manager.autoconvert_weights(
conf_path=args.conf,
weights_directory=path,
)
2023-03-03 06:02:00 +00:00
return gen
def load_face_restoration(opt):
try:
gfpgan, codeformer, esrgan = None, None, None
if opt.restore or opt.esrgan:
2023-03-03 06:02:00 +00:00
from invokeai.backend.restoration import Restoration
restoration = Restoration()
if opt.restore:
2023-03-03 06:02:00 +00:00
gfpgan, codeformer = restoration.load_face_restore_models(
opt.gfpgan_model_path
)
else:
2023-03-03 06:02:00 +00:00
print(">> Face restoration disabled")
if opt.esrgan:
esrgan = restoration.load_esrgan(opt.esrgan_bg_tile)
else:
2023-03-03 06:02:00 +00:00
print(">> Upscaling disabled")
else:
2023-03-03 06:02:00 +00:00
print(">> Face restoration and upscaling disabled")
except (ModuleNotFoundError, ImportError):
print(traceback.format_exc(), file=sys.stderr)
2023-03-03 06:02:00 +00:00
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
return gfpgan, codeformer, esrgan
2023-03-03 06:02:00 +00:00
def report_model_error(opt: Namespace, e: Exception):
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
2023-03-03 06:02:00 +00:00
print(
"** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models."
)
yes_to_all = os.environ.get("INVOKE_MODEL_RECONFIGURE")
if yes_to_all:
2023-03-03 06:02:00 +00:00
print(
"** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE"
)
else:
2023-03-03 06:02:00 +00:00
response = input(
"Do you want to run invokeai-configure script to select and/or reinstall models? [y] "
)
if response.startswith(("n", "N")):
return
2023-03-03 06:02:00 +00:00
print("invokeai-configure is launching....\n")
# Match arguments that were set on the CLI
# only the arguments accepted by the configuration script are parsed
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
config = ["--config", opt.conf] if opt.conf is not None else []
previous_args = sys.argv
2023-03-03 06:02:00 +00:00
sys.argv = ["invokeai-configure"]
sys.argv.extend(root_dir)
sys.argv.extend(config)
if yes_to_all is not None:
for arg in yes_to_all.split():
sys.argv.append(arg)
2023-03-03 06:02:00 +00:00
from invokeai.frontend.install import invokeai_configure
invokeai_configure()
# TODO: Figure out how to restart
# print('** InvokeAI will now restart')
# sys.argv = previous_args
# main() # would rather do a os.exec(), but doesn't exist?
# sys.exit(0)
# Temporary initializer for Generate until we migrate off of it
def old_get_generate(args, config) -> Generate:
# TODO: Remove the need for globals
2023-03-03 05:02:15 +00:00
from invokeai.backend.globals import Globals
# alert - setting globals here
2023-03-03 06:02:00 +00:00
Globals.root = os.path.expanduser(
args.root_dir or os.environ.get("INVOKEAI_ROOT") or os.path.abspath(".")
)
Globals.try_patchmatch = args.patchmatch
2023-03-03 06:02:00 +00:00
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
# these two lines prevent a horrible warning message from appearing
# when the frozen CLIP tokenizer is imported
import transformers
2023-03-03 06:02:00 +00:00
transformers.logging.set_verbosity_error()
# Loading Face Restoration and ESRGAN Modules
gfpgan, codeformer, esrgan = None, None, None
try:
if config.restore or config.esrgan:
from ldm.invoke.restoration import Restoration
2023-03-03 06:02:00 +00:00
restoration = Restoration()
if config.restore:
2023-03-03 06:02:00 +00:00
gfpgan, codeformer = restoration.load_face_restore_models(
config.gfpgan_model_path
)
else:
2023-03-03 06:02:00 +00:00
print(">> Face restoration disabled")
if config.esrgan:
esrgan = restoration.load_esrgan(config.esrgan_bg_tile)
else:
2023-03-03 06:02:00 +00:00
print(">> Upscaling disabled")
else:
2023-03-03 06:02:00 +00:00
print(">> Face restoration and upscaling disabled")
except (ModuleNotFoundError, ImportError):
print(traceback.format_exc(), file=sys.stderr)
2023-03-03 06:02:00 +00:00
print(">> You may need to install the ESRGAN and/or GFPGAN modules")
# normalize the config directory relative to root
if not os.path.isabs(config.conf):
2023-03-03 06:02:00 +00:00
config.conf = os.path.normpath(os.path.join(Globals.root, config.conf))
if config.embeddings:
if not os.path.isabs(config.embedding_path):
2023-03-03 06:02:00 +00:00
embedding_path = os.path.normpath(
os.path.join(Globals.root, config.embedding_path)
)
else:
embedding_path = None
# TODO: lazy-initialize this by wrapping it
try:
generate = Generate(
2023-03-03 06:02:00 +00:00
conf=config.conf,
model=config.model,
sampler_name=config.sampler_name,
embedding_path=embedding_path,
full_precision=config.full_precision,
precision=config.precision,
gfpgan=gfpgan,
codeformer=codeformer,
esrgan=esrgan,
free_gpu_mem=config.free_gpu_mem,
safety_checker=config.safety_checker,
max_loaded_models=config.max_loaded_models,
)
except (FileNotFoundError, TypeError, AssertionError):
2023-03-03 06:02:00 +00:00
# emergency_model_reconfigure() # TODO?
sys.exit(-1)
except (IOError, KeyError) as e:
2023-03-03 06:02:00 +00:00
print(f"{e}. Aborting.")
sys.exit(-1)
generate.free_gpu_mem = config.free_gpu_mem
return generate