resolve some undefined symbols in model_cache

This commit is contained in:
Lincoln Stein
2023-05-18 14:31:47 -04:00
223 changed files with 3236 additions and 8781 deletions

View File

@ -1,7 +1,6 @@
"""
Initialization file for invokeai.backend
"""
from .generate import Generate
from .generator import (
InvokeAIGeneratorBasicParams,
InvokeAIGenerator,
@ -12,5 +11,3 @@ from .generator import (
)
from .model_management import ModelManager, ModelCache, SDModelType, SDModelInfo
from .safety_checker import SafetyChecker
from .args import Args
from .globals import Globals

File diff suppressed because it is too large Load Diff

View File

@ -19,10 +19,10 @@ import warnings
from argparse import Namespace
from pathlib import Path
from shutil import get_terminal_size
from typing import get_type_hints
from urllib import request
import npyscreen
import torch
import transformers
from diffusers import AutoencoderKL
from huggingface_hub import HfFolder
@ -38,34 +38,40 @@ from transformers import (
import invokeai.configs as configs
from ...frontend.install.model_install import addModelsForm, process_and_execute
from ...frontend.install.widgets import (
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
from invokeai.frontend.install.widgets import (
CenteredButtonPress,
IntTitleSlider,
set_min_terminal_size,
)
from ..args import PRECISION_CHOICES, Args
from ..globals import Globals, global_cache_dir, global_config_dir, global_config_file
from .model_install_backend import (
from invokeai.backend.config.legacy_arg_parsing import legacy_parser
from invokeai.backend.config.model_install_backend import (
default_dataset,
download_from_hf,
hf_download_with_resume,
recommended_datasets,
)
from invokeai.app.services.config import (
get_invokeai_config,
InvokeAIAppConfig,
)
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()
# --------------------------globals-----------------------
config = get_invokeai_config()
Model_dir = "models"
Weights_dir = "ldm/stable-diffusion-v1/"
# the initial "configs" dir is now bundled in the `invokeai.configs` package
Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml"
Default_config_file = Path(global_config_dir()) / "models.yaml"
SD_Configs = Path(global_config_dir()) / "stable-diffusion"
Default_config_file = config.model_conf_path
SD_Configs = config.legacy_conf_path
Datasets = OmegaConf.load(Dataset_path)
@ -73,17 +79,12 @@ Datasets = OmegaConf.load(Dataset_path)
MIN_COLS = 135
MIN_LINES = 45
PRECISION_CHOICES = ['auto','float16','float32','autocast']
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
# This is the InvokeAI initialization file, which contains command-line default values.
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
# or renaming it and then running invokeai-configure again.
# Place frequently-used startup commands here, one or more per line.
# Examples:
# --outdir=D:\data\images
# --no-nsfw_checker
# --web --host=0.0.0.0
# --steps=20
# -Ak_euler_a -C10.0
"""
@ -96,14 +97,13 @@ If you installed manually from source or with 'pip install': activate the virtua
then run one of the following commands to start InvokeAI.
Web UI:
invokeai --web # (connect to http://localhost:9090)
invokeai --web --host 0.0.0.0 # (connect to http://your-lan-ip:9090 from another computer on the local network)
invokeai-web
Command-line interface:
Command-line client:
invokeai
If you installed using an installation script, run:
{Globals.root}/invoke.{"bat" if sys.platform == "win32" else "sh"}
{config.root}/invoke.{"bat" if sys.platform == "win32" else "sh"}
Add the '--help' argument to see all of the command-line switches available for use.
"""
@ -216,11 +216,11 @@ def download_realesrgan():
wdn_model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth"
model_dest = os.path.join(
Globals.root, "models/realesrgan/realesr-general-x4v3.pth"
config.root, "models/realesrgan/realesr-general-x4v3.pth"
)
wdn_model_dest = os.path.join(
Globals.root, "models/realesrgan/realesr-general-wdn-x4v3.pth"
config.root, "models/realesrgan/realesr-general-wdn-x4v3.pth"
)
download_with_progress_bar(model_url, model_dest, "RealESRGAN")
@ -243,7 +243,7 @@ def download_gfpgan():
"./models/gfpgan/weights/parsing_parsenet.pth",
],
):
model_url, model_dest = model[0], os.path.join(Globals.root, model[1])
model_url, model_dest = model[0], os.path.join(config.root, model[1])
download_with_progress_bar(model_url, model_dest, "GFPGAN weights")
@ -253,7 +253,7 @@ def download_codeformer():
model_url = (
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
)
model_dest = os.path.join(Globals.root, "models/codeformer/codeformer.pth")
model_dest = os.path.join(config.root, "models/codeformer/codeformer.pth")
download_with_progress_bar(model_url, model_dest, "CodeFormer")
@ -295,7 +295,7 @@ def download_vaes():
# first the diffusers version
repo_id = "stabilityai/sd-vae-ft-mse"
args = dict(
cache_dir=global_cache_dir("hub"),
cache_dir=config.cache_dir,
)
if not AutoencoderKL.from_pretrained(repo_id, **args):
raise Exception(f"download of {repo_id} failed")
@ -306,7 +306,7 @@ def download_vaes():
if not hf_download_with_resume(
repo_id=repo_id,
model_name=model_name,
model_dir=str(Globals.root / Model_dir / Weights_dir),
model_dir=str(config.root / Model_dir / Weights_dir),
):
raise Exception(f"download of {model_name} failed")
except Exception as e:
@ -321,8 +321,7 @@ def get_root(root: str = None) -> str:
elif os.environ.get("INVOKEAI_ROOT"):
return os.environ.get("INVOKEAI_ROOT")
else:
return Globals.root
return config.root
# -------------------------------------
class editOptsForm(npyscreen.FormMultiPage):
@ -332,7 +331,7 @@ class editOptsForm(npyscreen.FormMultiPage):
def create(self):
program_opts = self.parentApp.program_opts
old_opts = self.parentApp.invokeai_opts
first_time = not (Globals.root / Globals.initfile).exists()
first_time = not (config.root / 'invokeai.yaml').exists()
access_token = HfFolder.get_token()
window_width, window_height = get_terminal_size()
for i in [
@ -366,7 +365,7 @@ class editOptsForm(npyscreen.FormMultiPage):
self.outdir = self.add_widget_intelligent(
npyscreen.TitleFilename,
name="(<tab> autocompletes, ctrl-N advances):",
value=old_opts.outdir or str(default_output_dir()),
value=str(old_opts.outdir) or str(default_output_dir()),
select_dir=True,
must_exist=False,
use_two_lines=False,
@ -381,17 +380,17 @@ class editOptsForm(npyscreen.FormMultiPage):
editable=False,
color="CONTROL",
)
self.safety_checker = self.add_widget_intelligent(
self.nsfw_checker = self.add_widget_intelligent(
npyscreen.Checkbox,
name="NSFW checker",
value=old_opts.safety_checker,
value=old_opts.nsfw_checker,
relx=5,
scroll_exit=True,
)
self.nextrely += 1
for i in [
"If you have an account at HuggingFace you may paste your access token here",
'to allow InvokeAI to download styles & subjects from the "Concept Library".',
"If you have an account at HuggingFace you may optionally paste your access token here",
'to allow InvokeAI to download restricted styles & subjects from the "Concept Library".',
"See https://huggingface.co/settings/tokens",
]:
self.add_widget_intelligent(
@ -435,17 +434,10 @@ class editOptsForm(npyscreen.FormMultiPage):
relx=5,
scroll_exit=True,
)
self.xformers = self.add_widget_intelligent(
self.xformers_enabled = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Enable xformers support if available",
value=old_opts.xformers,
relx=5,
scroll_exit=True,
)
self.ckpt_convert = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Load legacy checkpoint models into memory as diffusers models",
value=old_opts.ckpt_convert,
value=old_opts.xformers_enabled,
relx=5,
scroll_exit=True,
)
@ -480,19 +472,30 @@ class editOptsForm(npyscreen.FormMultiPage):
self.nextrely += 1
self.add_widget_intelligent(
npyscreen.FixedText,
value="Directory containing embedding/textual inversion files:",
value="Directories containing textual inversion and LoRA models (<tab> autocompletes, ctrl-N advances):",
editable=False,
color="CONTROL",
)
self.embedding_path = self.add_widget_intelligent(
self.embedding_dir = self.add_widget_intelligent(
npyscreen.TitleFilename,
name="(<tab> autocompletes, ctrl-N advances):",
name=" Textual Inversion Embeddings:",
value=str(default_embedding_dir()),
select_dir=True,
must_exist=False,
use_two_lines=False,
labelColor="GOOD",
begin_entry_at=40,
begin_entry_at=32,
scroll_exit=True,
)
self.lora_dir = self.add_widget_intelligent(
npyscreen.TitleFilename,
name=" LoRA and LyCORIS:",
value=str(default_lora_dir()),
select_dir=True,
must_exist=False,
use_two_lines=False,
labelColor="GOOD",
begin_entry_at=32,
scroll_exit=True,
)
self.nextrely += 1
@ -559,9 +562,9 @@ class editOptsForm(npyscreen.FormMultiPage):
bad_fields.append(
f"The output directory does not seem to be valid. Please check that {str(Path(opt.outdir).parent)} is an existing directory."
)
if not Path(opt.embedding_path).parent.exists():
if not Path(opt.embedding_dir).parent.exists():
bad_fields.append(
f"The embedding directory does not seem to be valid. Please check that {str(Path(opt.embedding_path).parent)} is an existing directory."
f"The embedding directory does not seem to be valid. Please check that {str(Path(opt.embedding_dir).parent)} is an existing directory."
)
if len(bad_fields) > 0:
message = "The following problems were detected and must be corrected:\n"
@ -576,20 +579,23 @@ class editOptsForm(npyscreen.FormMultiPage):
new_opts = Namespace()
for attr in [
"outdir",
"safety_checker",
"free_gpu_mem",
"max_loaded_models",
"xformers",
"always_use_cpu",
"embedding_path",
"ckpt_convert",
"outdir",
"nsfw_checker",
"free_gpu_mem",
"max_loaded_models",
"xformers_enabled",
"always_use_cpu",
"embedding_dir",
"lora_dir",
]:
setattr(new_opts, attr, getattr(self, attr).value)
new_opts.hf_token = self.hf_token.value
new_opts.license_acceptance = self.license_acceptance.value
new_opts.precision = PRECISION_CHOICES[self.precision.value[0]]
# widget library workaround to make max_loaded_models an int rather than a float
new_opts.max_loaded_models = int(new_opts.max_loaded_models)
return new_opts
@ -628,15 +634,14 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam
def default_startup_options(init_file: Path) -> Namespace:
opts = Args().parse_args([])
opts = InvokeAIAppConfig(argv=[])
outdir = Path(opts.outdir)
if not outdir.is_absolute():
opts.outdir = str(Globals.root / opts.outdir)
opts.outdir = str(config.root / opts.outdir)
if not init_file.exists():
opts.safety_checker = True
opts.nsfw_checker = True
return opts
def default_user_selections(program_opts: Namespace) -> Namespace:
return Namespace(
starter_models=default_dataset()
@ -690,70 +695,61 @@ def run_console_ui(
# -------------------------------------
def write_opts(opts: Namespace, init_file: Path):
"""
Update the invokeai.init file with values from opts Namespace
Update the invokeai.yaml file with values from current settings.
"""
# touch file if it doesn't exist
if not init_file.exists():
with open(init_file, "w") as f:
f.write(INIT_FILE_PREAMBLE)
# We want to write in the changed arguments without clobbering
# any other initialization values the user has entered. There is
# no good way to do this because of the one-way nature of
# argparse: i.e. --outdir could be --outdir, --out, or -o
# initfile needs to be replaced with a fully structured format
# such as yaml; this is a hack that will work much of the time
args_to_skip = re.compile(
"^--?(o|out|no-xformer|xformer|no-ckpt|ckpt|free|no-nsfw|nsfw|prec|max_load|embed|always|ckpt|free_gpu)"
)
# fix windows paths
opts.outdir = opts.outdir.replace("\\", "/")
opts.embedding_path = opts.embedding_path.replace("\\", "/")
new_file = f"{init_file}.new"
try:
lines = [x.strip() for x in open(init_file, "r").readlines()]
with open(new_file, "w") as out_file:
for line in lines:
if len(line) > 0 and not args_to_skip.match(line):
out_file.write(line + "\n")
out_file.write(
f"""
--outdir={opts.outdir}
--embedding_path={opts.embedding_path}
--precision={opts.precision}
--max_loaded_models={int(opts.max_loaded_models)}
--{'no-' if not opts.safety_checker else ''}nsfw_checker
--{'no-' if not opts.xformers else ''}xformers
--{'no-' if not opts.ckpt_convert else ''}ckpt_convert
{'--free_gpu_mem' if opts.free_gpu_mem else ''}
{'--always_use_cpu' if opts.always_use_cpu else ''}
"""
)
except OSError as e:
print(f"** An error occurred while writing the init file: {str(e)}")
os.replace(new_file, init_file)
if opts.hf_token:
HfLogin(opts.hf_token)
# this will load current settings
config = InvokeAIAppConfig()
for key,value in opts.__dict__.items():
if hasattr(config,key):
setattr(config,key,value)
with open(init_file,'w', encoding='utf-8') as file:
file.write(config.to_yaml())
# -------------------------------------
def default_output_dir() -> Path:
return Globals.root / "outputs"
return config.root / "outputs"
# -------------------------------------
def default_embedding_dir() -> Path:
return Globals.root / "embeddings"
return config.root / "embeddings"
# -------------------------------------
def default_lora_dir() -> Path:
return config.root / "loras"
# -------------------------------------
def write_default_options(program_opts: Namespace, initfile: Path):
opt = default_startup_options(initfile)
opt.hf_token = HfFolder.get_token()
write_opts(opt, initfile)
# -------------------------------------
# Here we bring in
# the legacy Args object in order to parse
# the old init file and write out the new
# yaml format.
def migrate_init_file(legacy_format:Path):
old = legacy_parser.parse_args([f'@{str(legacy_format)}'])
new = InvokeAIAppConfig(conf={})
fields = list(get_type_hints(InvokeAIAppConfig).keys())
for attr in fields:
if hasattr(old,attr):
setattr(new,attr,getattr(old,attr))
# a few places where the field names have changed and we have to
# manually add in the new names/values
new.nsfw_checker = old.safety_checker
new.xformers_enabled = old.xformers
new.conf_path = old.conf
new.embedding_dir = old.embedding_path
invokeai_yaml = legacy_format.parent / 'invokeai.yaml'
with open(invokeai_yaml,"w", encoding="utf-8") as outfile:
outfile.write(new.to_yaml())
legacy_format.replace(legacy_format.parent / 'invokeai.init.old')
# -------------------------------------
def main():
@ -810,7 +806,8 @@ def main():
opt = parser.parse_args()
# setting a global here
Globals.root = Path(os.path.expanduser(get_root(opt.root) or ""))
global config
config.root = Path(os.path.expanduser(get_root(opt.root) or ""))
errors = set()
@ -818,19 +815,26 @@ def main():
models_to_download = default_user_selections(opt)
# We check for to see if the runtime directory is correctly initialized.
init_file = Path(Globals.root, Globals.initfile)
if not init_file.exists() or not global_config_file().exists():
initialize_rootdir(Globals.root, opt.yes_to_all)
old_init_file = Path(config.root, 'invokeai.init')
new_init_file = Path(config.root, 'invokeai.yaml')
if old_init_file.exists() and not new_init_file.exists():
print('** Migrating invokeai.init to invokeai.yaml')
migrate_init_file(old_init_file)
config = get_invokeai_config() # reread defaults
if not config.model_conf_path.exists():
initialize_rootdir(config.root, opt.yes_to_all)
if opt.yes_to_all:
write_default_options(opt, init_file)
write_default_options(opt, new_init_file)
init_options = Namespace(
precision="float32" if opt.full_precision else "float16"
)
else:
init_options, models_to_download = run_console_ui(opt, init_file)
init_options, models_to_download = run_console_ui(opt, new_init_file)
if init_options:
write_opts(init_options, init_file)
write_opts(init_options, new_init_file)
else:
print(
'\n** CANCELLED AT USER\'S REQUEST. USE THE "invoke.sh" LAUNCHER TO RUN LATER **\n'

View File

@ -0,0 +1,390 @@
# Copyright 2023 Lincoln D. Stein and the InvokeAI Team
import argparse
import shlex
from argparse import ArgumentParser
SAMPLER_CHOICES = [
"ddim",
"ddpm",
"deis",
"lms",
"pndm",
"heun",
"heun_k",
"euler",
"euler_k",
"euler_a",
"kdpm_2",
"kdpm_2_a",
"dpmpp_2s",
"dpmpp_2m",
"dpmpp_2m_k",
"unipc",
]
PRECISION_CHOICES = [
"auto",
"float32",
"autocast",
"float16",
]
class FileArgumentParser(ArgumentParser):
"""
Supports reading defaults from an init file.
"""
def convert_arg_line_to_args(self, arg_line):
return shlex.split(arg_line, comments=True)
legacy_parser = FileArgumentParser(
description=
"""
Generate images using Stable Diffusion.
Use --web to launch the web interface.
Use --from_file to load prompts from a file path or standard input ("-").
Otherwise you will be dropped into an interactive command prompt (type -h for help.)
Other command-line arguments are defaults that can usually be overridden
prompt the command prompt.
""",
fromfile_prefix_chars='@',
)
general_group = legacy_parser.add_argument_group('General')
model_group = legacy_parser.add_argument_group('Model selection')
file_group = legacy_parser.add_argument_group('Input/output')
web_server_group = legacy_parser.add_argument_group('Web server')
render_group = legacy_parser.add_argument_group('Rendering')
postprocessing_group = legacy_parser.add_argument_group('Postprocessing')
deprecated_group = legacy_parser.add_argument_group('Deprecated options')
deprecated_group.add_argument('--laion400m')
deprecated_group.add_argument('--weights') # deprecated
general_group.add_argument(
'--version','-V',
action='store_true',
help='Print InvokeAI version number'
)
model_group.add_argument(
'--root_dir',
default=None,
help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai.',
)
model_group.add_argument(
'--config',
'-c',
'-config',
dest='conf',
default='./configs/models.yaml',
help='Path to configuration file for alternate models.',
)
model_group.add_argument(
'--model',
help='Indicates which diffusion model to load (defaults to "default" stanza in configs/models.yaml)',
)
model_group.add_argument(
'--weight_dirs',
nargs='+',
type=str,
help='List of one or more directories that will be auto-scanned for new model weights to import',
)
model_group.add_argument(
'--png_compression','-z',
type=int,
default=6,
choices=range(0,9),
dest='png_compression',
help='level of PNG compression, from 0 (none) to 9 (maximum). Default is 6.'
)
model_group.add_argument(
'-F',
'--full_precision',
dest='full_precision',
action='store_true',
help='Deprecated way to set --precision=float32',
)
model_group.add_argument(
'--max_loaded_models',
dest='max_loaded_models',
type=int,
default=2,
help='Maximum number of models to keep in memory for fast switching, including the one in GPU',
)
model_group.add_argument(
'--free_gpu_mem',
dest='free_gpu_mem',
action='store_true',
help='Force free gpu memory before final decoding',
)
model_group.add_argument(
'--sequential_guidance',
dest='sequential_guidance',
action='store_true',
help="Calculate guidance in serial instead of in parallel, lowering memory requirement "
"at the expense of speed",
)
model_group.add_argument(
'--xformers',
action=argparse.BooleanOptionalAction,
default=True,
help='Enable/disable xformers support (default enabled if installed)',
)
model_group.add_argument(
"--always_use_cpu",
dest="always_use_cpu",
action="store_true",
help="Force use of CPU even if GPU is available"
)
model_group.add_argument(
'--precision',
dest='precision',
type=str,
choices=PRECISION_CHOICES,
metavar='PRECISION',
help=f'Set model precision. Defaults to auto selected based on device. Options: {", ".join(PRECISION_CHOICES)}',
default='auto',
)
model_group.add_argument(
'--ckpt_convert',
action=argparse.BooleanOptionalAction,
dest='ckpt_convert',
default=True,
help='Deprecated option. Legacy ckpt files are now always converted to diffusers when loaded.'
)
model_group.add_argument(
'--internet',
action=argparse.BooleanOptionalAction,
dest='internet_available',
default=True,
help='Indicate whether internet is available for just-in-time model downloading (default: probe automatically).',
)
model_group.add_argument(
'--nsfw_checker',
'--safety_checker',
action=argparse.BooleanOptionalAction,
dest='safety_checker',
default=False,
help='Check for and blur potentially NSFW images. Use --no-nsfw_checker to disable.',
)
model_group.add_argument(
'--autoimport',
default=None,
type=str,
help='Check the indicated directory for .ckpt/.safetensors weights files at startup and import directly',
)
model_group.add_argument(
'--autoconvert',
default=None,
type=str,
help='Check the indicated directory for .ckpt/.safetensors weights files at startup and import as optimized diffuser models',
)
model_group.add_argument(
'--patchmatch',
action=argparse.BooleanOptionalAction,
default=True,
help='Load the patchmatch extension for outpainting. Use --no-patchmatch to disable.',
)
file_group.add_argument(
'--from_file',
dest='infile',
type=str,
help='If specified, load prompts from this file',
)
file_group.add_argument(
'--outdir',
'-o',
type=str,
help='Directory to save generated images and a log of prompts and seeds. Default: ROOTDIR/outputs',
default='outputs',
)
file_group.add_argument(
'--prompt_as_dir',
'-p',
action='store_true',
help='Place images in subdirectories named after the prompt.',
)
render_group.add_argument(
'--fnformat',
default='{prefix}.{seed}.png',
type=str,
help='Overwrite the filename format. You can use any argument as wildcard enclosed in curly braces. Default is {prefix}.{seed}.png',
)
render_group.add_argument(
'-s',
'--steps',
type=int,
default=50,
help='Number of steps'
)
render_group.add_argument(
'-W',
'--width',
type=int,
help='Image width, multiple of 64',
)
render_group.add_argument(
'-H',
'--height',
type=int,
help='Image height, multiple of 64',
)
render_group.add_argument(
'-C',
'--cfg_scale',
default=7.5,
type=float,
help='Classifier free guidance (CFG) scale - higher numbers cause generator to "try" harder.',
)
render_group.add_argument(
'--sampler',
'-A',
'-m',
dest='sampler_name',
type=str,
choices=SAMPLER_CHOICES,
metavar='SAMPLER_NAME',
help=f'Set the default sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}',
default='k_lms',
)
render_group.add_argument(
'--log_tokenization',
'-t',
action='store_true',
help='shows how the prompt is split into tokens'
)
render_group.add_argument(
'-f',
'--strength',
type=float,
help='img2img strength for noising/unnoising. 0.0 preserves image exactly, 1.0 replaces it completely',
)
render_group.add_argument(
'-T',
'-fit',
'--fit',
action=argparse.BooleanOptionalAction,
help='If specified, will resize the input image to fit within the dimensions of width x height (512x512 default)',
)
render_group.add_argument(
'--grid',
'-g',
action=argparse.BooleanOptionalAction,
help='generate a grid'
)
render_group.add_argument(
'--embedding_directory',
'--embedding_path',
dest='embedding_path',
default='embeddings',
type=str,
help='Path to a directory containing .bin and/or .pt files, or a single .bin/.pt file. You may use subdirectories. (default is ROOTDIR/embeddings)'
)
render_group.add_argument(
'--lora_directory',
dest='lora_path',
default='loras',
type=str,
help='Path to a directory containing LoRA files; subdirectories are not supported. (default is ROOTDIR/loras)'
)
render_group.add_argument(
'--embeddings',
action=argparse.BooleanOptionalAction,
default=True,
help='Enable embedding directory (default). Use --no-embeddings to disable.',
)
render_group.add_argument(
'--enable_image_debugging',
action='store_true',
help='Generates debugging image to display'
)
render_group.add_argument(
'--karras_max',
type=int,
default=None,
help="control the point at which the K* samplers will shift from using the Karras noise schedule (good for low step counts) to the LatentDiffusion noise schedule (good for high step counts). Set to 0 to use LatentDiffusion for all step values, and to a high value (e.g. 1000) to use Karras for all step values. [29]."
)
# Restoration related args
postprocessing_group.add_argument(
'--no_restore',
dest='restore',
action='store_false',
help='Disable face restoration with GFPGAN or codeformer',
)
postprocessing_group.add_argument(
'--no_upscale',
dest='esrgan',
action='store_false',
help='Disable upscaling with ESRGAN',
)
postprocessing_group.add_argument(
'--esrgan_bg_tile',
type=int,
default=400,
help='Tile size for background sampler, 0 for no tile during testing. Default: 400.',
)
postprocessing_group.add_argument(
'--esrgan_denoise_str',
type=float,
default=0.75,
help='esrgan denoise str. 0 is no denoise, 1 is max denoise. Default: 0.75',
)
postprocessing_group.add_argument(
'--gfpgan_model_path',
type=str,
default='./models/gfpgan/GFPGANv1.4.pth',
help='Indicates the path to the GFPGAN model',
)
web_server_group.add_argument(
'--web',
dest='web',
action='store_true',
help='Start in web server mode.',
)
web_server_group.add_argument(
'--web_develop',
dest='web_develop',
action='store_true',
help='Start in web server development mode.',
)
web_server_group.add_argument(
"--web_verbose",
action="store_true",
help="Enables verbose logging",
)
web_server_group.add_argument(
"--cors",
nargs="*",
type=str,
help="Additional allowed origins, comma-separated",
)
web_server_group.add_argument(
'--host',
type=str,
default='127.0.0.1',
help='Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network.'
)
web_server_group.add_argument(
'--port',
type=int,
default='9090',
help='Web server: Port to listen on'
)
web_server_group.add_argument(
'--certfile',
type=str,
default=None,
help='Web server: Path to certificate file to use for SSL. Use together with --keyfile'
)
web_server_group.add_argument(
'--keyfile',
type=str,
default=None,
help='Web server: Path to private key file to use for SSL. Use together with --certfile'
)
web_server_group.add_argument(
'--gui',
dest='gui',
action='store_true',
help='Start InvokeAI GUI',
)

View File

@ -19,13 +19,15 @@ from tqdm import tqdm
import invokeai.configs as configs
from ..globals import Globals, global_cache_dir, global_config_dir
from invokeai.app.services.config import get_invokeai_config
from ..model_management import ModelManager
from ..stable_diffusion import StableDiffusionGeneratorPipeline
warnings.filterwarnings("ignore")
# --------------------------globals-----------------------
config = get_invokeai_config()
Model_dir = "models"
Weights_dir = "ldm/stable-diffusion-v1/"
@ -47,12 +49,11 @@ Config_preamble = """
def default_config_file():
return Path(global_config_dir()) / "models.yaml"
return config.model_conf_path
def sd_configs():
return Path(global_config_dir()) / "stable-diffusion"
return config.legacy_conf_path
def initial_models():
global Datasets
@ -121,8 +122,9 @@ def install_requested_models(
if scan_at_startup and scan_directory.is_dir():
argument = "--autoconvert"
initfile = Path(Globals.root, Globals.initfile)
replacement = Path(Globals.root, f"{Globals.initfile}.new")
print('** The global initfile is no longer supported; rewrite to support new yaml format **')
initfile = Path(config.root, 'invokeai.init')
replacement = Path(config.root, f"invokeai.init.new")
directory = str(scan_directory).replace("\\", "/")
with open(initfile, "r") as input:
with open(replacement, "w") as output:
@ -150,7 +152,7 @@ def get_root(root: str = None) -> str:
elif os.environ.get("INVOKEAI_ROOT"):
return os.environ.get("INVOKEAI_ROOT")
else:
return Globals.root
return config.root
# ---------------------------------------------
@ -183,7 +185,7 @@ def all_datasets() -> dict:
# look for legacy model.ckpt in models directory and offer to
# normalize its name
def migrate_models_ckpt():
model_path = os.path.join(Globals.root, Model_dir, Weights_dir)
model_path = os.path.join(config.root, Model_dir, Weights_dir)
if not os.path.exists(os.path.join(model_path, "model.ckpt")):
return
new_name = initial_models()["stable-diffusion-1.4"]["file"]
@ -228,7 +230,7 @@ def _download_repo_or_file(
def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
repo_id = mconfig["repo_id"]
filename = mconfig["file"]
cache_dir = os.path.join(Globals.root, Model_dir, Weights_dir)
cache_dir = os.path.join(config.root, Model_dir, Weights_dir)
return hf_download_with_resume(
repo_id=repo_id,
model_dir=cache_dir,
@ -239,9 +241,9 @@ def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
# ---------------------------------------------
def download_from_hf(
model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs
model_class: object, model_name: str, **kwargs
):
path = global_cache_dir(cache_subdir)
path = config.cache_dir
model = model_class.from_pretrained(
model_name,
cache_dir=path,
@ -417,7 +419,7 @@ def new_config_file_contents(
stanza["height"] = mod["height"]
if "file" in mod:
stanza["weights"] = os.path.relpath(
successfully_downloaded[model], start=Globals.root
successfully_downloaded[model], start=config.root
)
stanza["config"] = os.path.normpath(
os.path.join(sd_configs(), mod["config"])
@ -456,7 +458,7 @@ def delete_weights(model_name: str, conf_stanza: dict):
weights = Path(weights)
if not weights.is_absolute():
weights = Path(Globals.root) / weights
weights = Path(config.root) / weights
try:
weights.unlink()
except OSError as e:

File diff suppressed because it is too large Load Diff

View File

@ -1,126 +0,0 @@
"""
invokeai.backend.globals defines a small number of global variables that would
otherwise have to be passed through long and complex call chains.
It defines a Namespace object named "Globals" that contains
the attributes:
- root - the root directory under which "models" and "outputs" can be found
- initfile - path to the initialization file
- try_patchmatch - option to globally disable loading of 'patchmatch' module
- always_use_cpu - force use of CPU even if GPU is available
"""
import os
import os.path as osp
from argparse import Namespace
from pathlib import Path
from typing import Union
Globals = Namespace()
# Where to look for the initialization file and other key components
Globals.initfile = "invokeai.init"
Globals.models_file = "models.yaml"
Globals.models_dir = "models"
Globals.config_dir = "configs"
Globals.autoscan_dir = "weights"
Globals.converted_ckpts_dir = "converted_ckpts"
# Set the default root directory. This can be overwritten by explicitly
# passing the `--root <directory>` argument on the command line.
# logic is:
# 1) use INVOKEAI_ROOT environment variable (no check for this being a valid directory)
# 2) use VIRTUAL_ENV environment variable, with a check for initfile being there
# 3) use ~/invokeai
if os.environ.get("INVOKEAI_ROOT"):
Globals.root = osp.abspath(os.environ.get("INVOKEAI_ROOT"))
elif (
os.environ.get("VIRTUAL_ENV")
and Path(os.environ.get("VIRTUAL_ENV"), "..", Globals.initfile).exists()
):
Globals.root = osp.abspath(osp.join(os.environ.get("VIRTUAL_ENV"), ".."))
else:
Globals.root = osp.abspath(osp.expanduser("~/invokeai"))
# Try loading patchmatch
Globals.try_patchmatch = True
# Use CPU even if GPU is available (main use case is for debugging MPS issues)
Globals.always_use_cpu = False
# Whether the internet is reachable for dynamic downloads
# The CLI will test connectivity at startup time.
Globals.internet_available = True
# Whether to disable xformers
Globals.disable_xformers = False
# Low-memory tradeoff for guidance calculations.
Globals.sequential_guidance = False
# whether we are forcing full precision
Globals.full_precision = False
# whether we should convert ckpt files into diffusers models on the fly
Globals.ckpt_convert = True
# logging tokenization everywhere
Globals.log_tokenization = False
def global_config_file() -> Path:
return Path(Globals.root, Globals.config_dir, Globals.models_file)
def global_config_dir() -> Path:
return Path(Globals.root, Globals.config_dir)
def global_models_dir() -> Path:
return Path(Globals.root, Globals.models_dir)
def global_autoscan_dir() -> Path:
return Path(Globals.root, Globals.autoscan_dir)
def global_converted_ckpts_dir() -> Path:
return Path(global_models_dir(), Globals.converted_ckpts_dir)
def global_set_root(root_dir: Union[str, Path]):
Globals.root = root_dir
def global_resolve_path(path: Union[str,Path]):
if path is None:
return None
return Path(Globals.root,path).resolve()
def global_cache_dir(subdir: Union[str, Path] = "") -> Path:
"""
Returns Path to the model cache directory. If a subdirectory
is provided, it will be appended to the end of the path, allowing
for Hugging Face-style conventions. Currently, Hugging Face has
moved all models into the "hub" subfolder, so for any pretrained
HF model, use:
global_cache_dir('hub')
The legacy location for transformers used to be global_cache_dir('transformers')
and global_cache_dir('diffusers') for diffusers.
"""
home: str = os.getenv("HF_HOME")
if home is None:
home = os.getenv("XDG_CACHE_HOME")
if home is not None:
# Set `home` to $XDG_CACHE_HOME/huggingface, which is the default location mentioned in Hugging Face Hub Client Library.
# See: https://huggingface.co/docs/huggingface_hub/main/en/package_reference/environment_variables#xdgcachehome
home += os.sep + "huggingface"
if home is not None:
return Path(home, subdir)
else:
return Path(Globals.root, "models", subdir)

View File

@ -6,7 +6,7 @@ be suppressed or deferred
"""
import numpy as np
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals
from invokeai.app.services.config import get_invokeai_config
class PatchMatch:
"""
@ -21,9 +21,10 @@ class PatchMatch:
@classmethod
def _load_patch_match(self):
config = get_invokeai_config()
if self.tried_load:
return
if Globals.try_patchmatch:
if config.try_patchmatch:
from patchmatch import patch_match as pm
if pm.patchmatch_available:

View File

@ -33,12 +33,11 @@ from PIL import Image, ImageOps
from transformers import AutoProcessor, CLIPSegForImageSegmentation
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import global_cache_dir
from invokeai.app.services.config import get_invokeai_config
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
CLIPSEG_SIZE = 352
class SegmentedGrayscale(object):
def __init__(self, image: Image, heatmap: torch.Tensor):
self.heatmap = heatmap
@ -84,14 +83,15 @@ class Txt2Mask(object):
def __init__(self, device="cpu", refined=False):
logger.info("Initializing clipseg model for text to mask inference")
config = get_invokeai_config()
# BUG: we are not doing anything with the device option at this time
self.device = device
self.processor = AutoProcessor.from_pretrained(
CLIPSEG_MODEL, cache_dir=global_cache_dir("hub")
CLIPSEG_MODEL, cache_dir=config.cache_dir
)
self.model = CLIPSegForImageSegmentation.from_pretrained(
CLIPSEG_MODEL, cache_dir=global_cache_dir("hub")
CLIPSEG_MODEL, cache_dir=config.cache_dir
)
@torch.no_grad()

View File

@ -26,7 +26,7 @@ import torch
from safetensors.torch import load_file
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import global_cache_dir, global_config_dir
from invokeai.app.services.config import get_invokeai_config
from .model_manager import ModelManager, SDLegacyType
from .model_cache import ModelCache
@ -76,7 +76,6 @@ from transformers import (
from ..stable_diffusion import StableDiffusionGeneratorPipeline
def shave_segments(path, n_shave_prefix_segments=1):
"""
Removes segments. Positive values shave the first segments, negative shave the last segments.
@ -858,7 +857,7 @@ def convert_ldm_bert_checkpoint(checkpoint, config):
def convert_ldm_clip_checkpoint(checkpoint):
text_model = CLIPTextModel.from_pretrained(
"openai/clip-vit-large-patch14", cache_dir=global_cache_dir("hub")
"openai/clip-vit-large-patch14", cache_dir=get_invokeai_config().cache_dir
)
keys = list(checkpoint.keys())
@ -913,7 +912,7 @@ textenc_pattern = re.compile("|".join(protected.keys()))
def convert_paint_by_example_checkpoint(checkpoint):
cache_dir = global_cache_dir("hub")
cache_dir = get_invokeai_config().cache_dir
config = CLIPVisionConfig.from_pretrained(
"openai/clip-vit-large-patch14", cache_dir=cache_dir
)
@ -985,7 +984,7 @@ def convert_paint_by_example_checkpoint(checkpoint):
def convert_open_clip_checkpoint(checkpoint):
cache_dir = global_cache_dir("hub")
cache_dir = get_invokeai_config().cache_dir
text_model = CLIPTextModel.from_pretrained(
"stabilityai/stable-diffusion-2", subfolder="text_encoder", cache_dir=cache_dir
)
@ -1121,7 +1120,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
:param vae: A diffusers VAE to load into the pipeline.
:param vae_path: Path to a checkpoint VAE that will be converted into diffusers and loaded into the pipeline.
"""
config = get_invokeai_config()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
verbosity = dlogging.get_verbosity()
@ -1134,7 +1133,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
else:
checkpoint = load_file(checkpoint_path)
cache_dir = global_cache_dir("hub")
cache_dir = config.cache_dir
pipeline_class = (
StableDiffusionGeneratorPipeline
if return_generator_pipeline
@ -1158,25 +1157,23 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
if model_type == SDLegacyType.V2_v:
original_config_file = (
global_config_dir() / "stable-diffusion" / "v2-inference-v.yaml"
config.legacy_conf_path / "v2-inference-v.yaml"
)
if global_step == 110000:
# v2.1 needs to upcast attention
upcast_attention = True
elif model_type == SDLegacyType.V2_e:
original_config_file = (
global_config_dir() / "stable-diffusion" / "v2-inference.yaml"
config.legacy_conf_path / "v2-inference.yaml"
)
elif model_type == SDLegacyType.V1_INPAINT:
original_config_file = (
global_config_dir()
/ "stable-diffusion"
/ "v1-inpainting-inference.yaml"
config.legacy_conf_path / "v1-inpainting-inference.yaml"
)
elif model_type == SDLegacyType.V1:
original_config_file = (
global_config_dir() / "stable-diffusion" / "v1-inference.yaml"
config.legacy_conf_path / "v1-inference.yaml"
)
else:
@ -1323,7 +1320,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
)
safety_checker = StableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker",
cache_dir=global_cache_dir("hub"),
cache_dir=config.cache_dir,
)
feature_extractor = AutoFeatureExtractor.from_pretrained(
"CompVis/stable-diffusion-safety-checker", cache_dir=cache_dir

View File

@ -25,27 +25,25 @@ import warnings
from contextlib import suppress
from enum import Enum
from pathlib import Path
from typing import Dict, Sequence, Union, Tuple, types, Optional, List, Type, Any
from typing import Dict, Sequence, Union, types, Optional, List, Type, Any
import torch
import safetensors.torch
from diffusers import DiffusionPipeline, SchedulerMixin, ConfigMixin
from diffusers import logging as diffusers_logging
from huggingface_hub import HfApi, scan_cache_dir
from picklescan.scanner import scan_file_path
from pydantic import BaseModel
from transformers import logging as transformers_logging
import invokeai.backend.util.logging as logger
from ..globals import global_cache_dir
from invokeai.app.services.config import get_invokeai_config
def get_model_path(repo_id_or_path: str):
globals = get_invokeai_config()
if os.path.exists(repo_id_or_path):
return repo_id_or_path
cache = scan_cache_dir(global_cache_dir("hub"))
cache = scan_cache_dir(globals.cache_dir)
for repo in cache.repos:
if repo.repo_id != repo_id_or_path:
continue
@ -234,7 +232,7 @@ class DiffusersModelInfo(ModelInfoBase):
model = self.child_types[child_type].from_pretrained(
self.repo_id_or_path,
subfolder=child_type.value,
cache_dir=global_cache_dir('hub'),
cache_dir=get_invokeai_config.cache_dir('hub'),
torch_dtype=torch_dtype,
variant=variant,
)
@ -248,7 +246,7 @@ class DiffusersModelInfo(ModelInfoBase):
return model
def get_pipeline(self, **kwrags):
def get_pipeline(self, **kwargs):
return DiffusionPipeline.from_pretrained(
self.repo_id_or_path,
**kwargs,
@ -349,7 +347,7 @@ class ClassifierModelInfo(ModelInfoBase):
model = self.child_types[child_type].from_pretrained(
self.repo_id_or_path,
subfolder=child_type.value,
cache_dir=global_cache_dir('hub'),
cache_dir=get_invokeai_config().cache_dir('hub'),
torch_dtype=torch_dtype,
)
# calc more accurate size
@ -394,7 +392,7 @@ class VaeModelInfo(ModelInfoBase):
model = self.vae_type.from_pretrained(
self.repo_id_or_path,
cache_dir=global_cache_dir('hub'),
cache_dir=get_invokeai_config().cache_dir('hub'),
torch_dtype=torch_dtype,
)
# calc more accurate size

View File

@ -149,8 +149,7 @@ from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import (Globals, global_cache_dir,
global_resolve_path)
from invokeai.app.services.config import get_invokeai_config
from invokeai.backend.util import download_with_resume
from ..util import CUDA_DEVICE
@ -226,7 +225,8 @@ class ModelManager(object):
# check config version number and update on disk/RAM if necessary
self._update_config_file_version()
self.globals = get_invokeai_config()
self.logger = logger
self.cache = ModelCache(
max_cache_size=max_cache_size,
execution_device = device_type,
@ -235,7 +235,6 @@ class ModelManager(object):
logger = logger,
)
self.cache_keys = dict()
self.logger = logger
def model_exists(
self,
@ -304,12 +303,6 @@ class ModelManager(object):
# raises an InvalidModelError
"""
# Commented-out workaround for callers that use "type/name" as the model name
# because they haven't adjusted to the new return format of `list_models()`
# if "/" in model_name:
# model_key = model_name
# else:
model_key = self.create_key(model_name, model_type)
if model_key not in self.config:
raise InvalidModelError(
@ -326,13 +319,15 @@ class ModelManager(object):
if mconfig.format in ["ckpt", "safetensors"]:
location = self.convert_ckpt_and_cache(mconfig)
else:
location = global_resolve_path(mconfig.get('path')) or mconfig.get('repo_id')
location = self.globals.root_dir / mconfig.get('path') or mconfig.get('repo_id')
elif p := mconfig.get('path'):
location = self.globals.root_dir / p
elif r := mconfig.get('repo_id'):
location = r
elif w := mconfig.get('weights'):
location = self.globals.root_dir / w
else:
location = global_resolve_path(
mconfig.get('path')) \
or mconfig.get('repo_id') \
or global_resolve_path(mconfig.get('weights')
)
location = None
revision = mconfig.get('revision')
hash = self.cache.model_hash(location, revision)
@ -423,7 +418,7 @@ class ModelManager(object):
"""
# if we are converting legacy files automatically, then
# there are no legacy ckpts!
if Globals.ckpt_convert:
if self.globals.ckpt_convert:
return False
info = self.model_info(model_name, model_type)
if "weights" in info and info["weights"].endswith((".ckpt", ".safetensors")):
@ -862,25 +857,16 @@ class ModelManager(object):
model_type = self.probe_model_type(checkpoint)
if model_type == SDLegacyType.V1:
self.logger.debug("SD-v1 model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
)
model_config_file = self.globals.legacy_conf_path / "v1-inference.yaml"
elif model_type == SDLegacyType.V1_INPAINT:
self.logger.debug("SD-v1 inpainting model detected")
model_config_file = Path(
Globals.root,
"configs/stable-diffusion/v1-inpainting-inference.yaml",
)
model_config_file = self.globals.legacy_conf_path / "v1-inpainting-inference.yaml",
elif model_type == SDLegacyType.V2_v:
self.logger.debug("SD-v2-v model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
)
model_config_file = self.globals.legacy_conf_path / "v2-inference-v.yaml"
elif model_type == SDLegacyType.V2_e:
self.logger.debug("SD-v2-e model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
)
model_config_file = self.globals.legacy_conf_path / "v2-inference.yaml"
elif model_type == SDLegacyType.V2:
self.logger.warning(
f"{thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path."
@ -907,9 +893,7 @@ class ModelManager(object):
self.logger.debug(f"Using VAE file {vae_path.name}")
vae = None if vae_path else dict(repo_id="stabilityai/sd-vae-ft-mse")
diffuser_path = Path(
Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem
)
diffuser_path = self.globals.converted_ckpts_dir / model_path.stem
with SilenceWarnings():
model_name = self.convert_and_import(
model_path,
@ -930,9 +914,9 @@ class ModelManager(object):
diffusers, cache it to disk, and return Path to converted
file. If already on disk then just returns Path.
"""
weights = global_resolve_path(mconfig.weights)
config_file = global_resolve_path(mconfig.config)
diffusers_path = global_resolve_path(Path('models',Globals.converted_ckpts_dir)) / weights.stem
weights = self.globals.root_dir / mconfig.weights
config_file = self.globals.root_dir / mconfig.config
diffusers_path = self.globals.converted_ckpts_dir / weights.stem
# return cached version if it exists
if diffusers_path.exists():
@ -949,7 +933,7 @@ class ModelManager(object):
extract_ema=True,
original_config_file=config_file,
vae=vae_model,
vae_path=str(global_resolve_path(vae_ckpt_path)) if vae_ckpt_path else None,
vae_path=str(self.globals.root_dir / vae_ckpt_path) if vae_ckpt_path else None,
scan_needed=True,
)
return diffusers_path
@ -960,9 +944,10 @@ class ModelManager(object):
object, cache it to disk, and return Path to converted
file. If already on disk then just returns Path.
"""
weights_file = global_resolve_path(mconfig.weights)
config_file = global_resolve_path(mconfig.config)
diffusers_path = global_resolve_path(Path('models',Globals.converted_ckpts_dir)) / weights_file.stem
root = self.globals.root_dir
weights_file = root / mconfig.weights
config_file = root / mconfig.config
diffusers_path = self.globals.converted_ckpts_dir / weights_file.stem
image_size = mconfig.get('width') or mconfig.get('height') or 512
# return cached version if it exists
@ -1018,7 +1003,9 @@ class ModelManager(object):
# 3. If mconfig has a vae dict, then we use it as the diffusers-style vae
if vae_config and isinstance(vae_config,DictConfig):
vae_diffusers_location = global_resolve_path(vae_config.get('path')) or vae_config.get('repo_id')
vae_diffusers_location = self.globals.root_dir / vae_config.get('path') \
if vae_config.get('path') \
else vae_config.get('repo_id')
# 4. Otherwise, we use stabilityai/sd-vae-ft-mse "because it works"
else:
@ -1072,7 +1059,9 @@ class ModelManager(object):
# will be built into the model rather than tacked on afterward via the config file
vae_model = None
if vae:
vae_location = global_resolve_path(vae.get('path')) or vae.get('repo_id')
vae_location = self.globals.root_dir / vae.get('path') \
if vae.get('path') \
else vae.get('repo_id')
vae_model = self.cache.get_model(vae_location, SDModelType.Vae).model
vae_path = None
convert_ckpt_to_diffusers(
@ -1140,6 +1129,7 @@ class ModelManager(object):
yaml_str = OmegaConf.to_yaml(self.config)
config_file_path = conf_file or self.config_path
assert config_file_path is not None,'no config file path to write to'
config_file_path = self.globals.root_dir / config_file_path
tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp")
with open(tmpfile, "w", encoding="utf-8") as outfile:
outfile.write(self.preamble())
@ -1160,7 +1150,7 @@ class ModelManager(object):
@classmethod
def _delete_model_from_cache(cls,repo_id):
cache_info = scan_cache_dir(global_cache_dir("hub"))
cache_info = scan_cache_dir(get_invokeai_config().cache_dir)
# I'm sure there is a way to do this with comprehensions
# but the code quickly became incomprehensible!
@ -1177,9 +1167,10 @@ class ModelManager(object):
@staticmethod
def _abs_path(path: str | Path) -> Path:
globals = get_invokeai_config()
if path is None or Path(path).is_absolute():
return path
return Path(Globals.root, path).resolve()
return Path(globals.root_dir, path).resolve()
# This is not the same as global_resolve_path(), which prepends
# Globals.root.
@ -1188,15 +1179,11 @@ class ModelManager(object):
) -> Optional[Path]:
resolved_path = None
if str(source).startswith(("http:", "https:", "ftp:")):
dest_directory = Path(dest_directory)
if not dest_directory.is_absolute():
dest_directory = Globals.root / dest_directory
dest_directory = self.globals.root_dir / dest_directory
dest_directory.mkdir(parents=True, exist_ok=True)
resolved_path = download_with_resume(str(source), dest_directory)
else:
if not os.path.isabs(source):
source = os.path.join(Globals.root, source)
resolved_path = Path(source)
resolved_path = self.globals.root_dir / source
return resolved_path
def _update_config_file_version(self):

View File

@ -17,67 +17,59 @@ from compel.prompt_parser import (
FlattenedPrompt,
Fragment,
PromptParser,
Conjunction,
)
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals
from invokeai.app.services.config import get_invokeai_config
from ..stable_diffusion import InvokeAIDiffuserComponent
from ..util import torch_dtype
def get_uc_and_c_and_ec(
prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False
):
def get_uc_and_c_and_ec(prompt_string,
model: InvokeAIDiffuserComponent,
log_tokens=False, skip_normalize_legacy_blend=False):
# lazy-load any deferred textual inversions.
# this might take a couple of seconds the first time a textual inversion is used.
model.textual_inversion_manager.create_deferred_token_ids_for_any_trigger_terms(
prompt_string
)
model.textual_inversion_manager.create_deferred_token_ids_for_any_trigger_terms(prompt_string)
tokenizer = model.tokenizer
compel = Compel(
tokenizer=tokenizer,
text_encoder=model.text_encoder,
textual_inversion_manager=model.textual_inversion_manager,
dtype_for_device_getter=torch_dtype,
truncate_long_prompts=False
)
compel = Compel(tokenizer=model.tokenizer,
text_encoder=model.text_encoder,
textual_inversion_manager=model.textual_inversion_manager,
dtype_for_device_getter=torch_dtype,
truncate_long_prompts=False,
)
config = get_invokeai_config()
# get rid of any newline characters
prompt_string = prompt_string.replace("\n", " ")
(
positive_prompt_string,
negative_prompt_string,
) = split_prompt_to_positive_and_negative(prompt_string)
legacy_blend = try_parse_legacy_blend(
positive_prompt_string, skip_normalize_legacy_blend
)
positive_prompt: Union[FlattenedPrompt, Blend]
if legacy_blend is not None:
positive_prompt = legacy_blend
else:
positive_prompt = Compel.parse_prompt_string(positive_prompt_string)
negative_prompt: Union[FlattenedPrompt, Blend] = Compel.parse_prompt_string(
negative_prompt_string
)
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
if log_tokens or getattr(Globals, "log_tokenization", False):
log_tokenization(positive_prompt, negative_prompt, tokenizer=tokenizer)
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
positive_conjunction: Conjunction
if legacy_blend is not None:
positive_conjunction = legacy_blend
else:
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
positive_prompt = positive_conjunction.prompts[0]
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
negative_prompt: FlattenedPrompt | Blend = negative_conjunction.prompts[0]
tokens_count = get_max_token_count(model.tokenizer, positive_prompt)
if log_tokens or config.log_tokenization:
log_tokenization(positive_prompt, negative_prompt, tokenizer=model.tokenizer)
c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt)
uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt)
[c, uc] = compel.pad_conditioning_tensors_to_same_length([c, uc])
tokens_count = get_max_token_count(tokenizer, positive_prompt)
ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(
tokens_count_including_eos_bos=tokens_count,
cross_attention_control_args=options.get("cross_attention_control", None),
)
ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count,
cross_attention_control_args=options.get(
'cross_attention_control', None))
return uc, c, ec
def get_prompt_structure(
prompt_string, skip_normalize_legacy_blend: bool = False
) -> (Union[FlattenedPrompt, Blend], FlattenedPrompt):
@ -88,18 +80,17 @@ def get_prompt_structure(
legacy_blend = try_parse_legacy_blend(
positive_prompt_string, skip_normalize_legacy_blend
)
positive_prompt: Union[FlattenedPrompt, Blend]
positive_prompt: Conjunction
if legacy_blend is not None:
positive_prompt = legacy_blend
positive_conjunction = legacy_blend
else:
positive_prompt = Compel.parse_prompt_string(positive_prompt_string)
negative_prompt: Union[FlattenedPrompt, Blend] = Compel.parse_prompt_string(
negative_prompt_string
)
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
positive_prompt = positive_conjunction.prompts[0]
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
negative_prompt: FlattenedPrompt|Blend = negative_conjunction.prompts[0]
return positive_prompt, negative_prompt
def get_max_token_count(
tokenizer, prompt: Union[FlattenedPrompt, Blend], truncate_if_too_long=False
) -> int:
@ -246,22 +237,21 @@ def log_tokenization_for_text(text, tokenizer, display_label=None, truncate_if_t
logger.info(f"[TOKENLOG] Tokens Discarded ({totalTokens - usedTokens}):")
logger.debug(f"{discarded}\x1b[0m")
def try_parse_legacy_blend(text: str, skip_normalize: bool = False) -> Optional[Blend]:
def try_parse_legacy_blend(text: str, skip_normalize: bool = False) -> Optional[Conjunction]:
weighted_subprompts = split_weighted_subprompts(text, skip_normalize=skip_normalize)
if len(weighted_subprompts) <= 1:
return None
strings = [x[0] for x in weighted_subprompts]
weights = [x[1] for x in weighted_subprompts]
pp = PromptParser()
parsed_conjunctions = [pp.parse_conjunction(x) for x in strings]
flattened_prompts = [x.prompts[0] for x in parsed_conjunctions]
return Blend(
prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize
)
flattened_prompts = []
weights = []
for i, x in enumerate(parsed_conjunctions):
if len(x.prompts)>0:
flattened_prompts.append(x.prompts[0])
weights.append(weighted_subprompts[i][1])
return Conjunction([Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)])
def split_weighted_subprompts(text, skip_normalize=False) -> list:
"""

View File

@ -6,7 +6,7 @@ import numpy as np
import torch
import invokeai.backend.util.logging as logger
from ..globals import Globals
from invokeai.app.services.config import get_invokeai_config
pretrained_model_url = (
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
@ -17,11 +17,11 @@ class CodeFormerRestoration:
def __init__(
self, codeformer_dir="models/codeformer", codeformer_model_path="codeformer.pth"
) -> None:
if not os.path.isabs(codeformer_dir):
codeformer_dir = os.path.join(Globals.root, codeformer_dir)
self.model_path = os.path.join(codeformer_dir, codeformer_model_path)
self.codeformer_model_exists = os.path.isfile(self.model_path)
self.globals = get_invokeai_config()
codeformer_dir = self.globals.root_dir / codeformer_dir
self.model_path = codeformer_dir / codeformer_model_path
self.codeformer_model_exists = self.model_path.exists()
if not self.codeformer_model_exists:
logger.error("NOT FOUND: CodeFormer model not found at " + self.model_path)
@ -71,9 +71,7 @@ class CodeFormerRestoration:
upscale_factor=1,
use_parse=True,
device=device,
model_rootpath=os.path.join(
Globals.root, "models", "gfpgan", "weights"
),
model_rootpath = self.globals.root_dir / "gfpgan" / "weights"
)
face_helper.clean_all()
face_helper.read_image(bgr_image_array)

View File

@ -7,14 +7,13 @@ import torch
from PIL import Image
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals
from invokeai.app.services.config import get_invokeai_config
class GFPGAN:
def __init__(self, gfpgan_model_path="models/gfpgan/GFPGANv1.4.pth") -> None:
self.globals = get_invokeai_config()
if not os.path.isabs(gfpgan_model_path):
gfpgan_model_path = os.path.abspath(
os.path.join(Globals.root, gfpgan_model_path)
)
gfpgan_model_path = self.globals.root_dir / gfpgan_model_path
self.model_path = gfpgan_model_path
self.gfpgan_model_exists = os.path.isfile(self.model_path)
@ -33,7 +32,7 @@ class GFPGAN:
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
cwd = os.getcwd()
os.chdir(os.path.join(Globals.root, "models"))
os.chdir(self.globals.root_dir / 'models')
try:
from gfpgan import GFPGANer

View File

@ -1,4 +1,3 @@
import os
import warnings
import numpy as np
@ -7,7 +6,8 @@ from PIL import Image
from PIL.Image import Image as ImageType
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals
from invokeai.app.services.config import get_invokeai_config
config = get_invokeai_config()
class ESRGAN:
def __init__(self, bg_tile_size=400) -> None:
@ -30,12 +30,8 @@ class ESRGAN:
upscale=4,
act_type="prelu",
)
model_path = os.path.join(
Globals.root, "models/realesrgan/realesr-general-x4v3.pth"
)
wdn_model_path = os.path.join(
Globals.root, "models/realesrgan/realesr-general-wdn-x4v3.pth"
)
model_path = config.root_dir / "models/realesrgan/realesr-general-x4v3.pth"
wdn_model_path = config.root_dir / "models/realesrgan/realesr-general-wdn-x4v3.pth"
scale = 4
bg_upsampler = RealESRGANer(

View File

@ -15,7 +15,7 @@ from transformers import AutoFeatureExtractor
import invokeai.assets.web as web_assets
import invokeai.backend.util.logging as logger
from .globals import global_cache_dir
from invokeai.app.services.config import get_invokeai_config
from .util import CPU_DEVICE
class SafetyChecker(object):
@ -26,10 +26,11 @@ class SafetyChecker(object):
caution = Image.open(path)
self.caution_img = caution.resize((caution.width // 2, caution.height // 2))
self.device = device
config = get_invokeai_config()
try:
safety_model_id = "CompVis/stable-diffusion-safety-checker"
safety_model_path = global_cache_dir("hub")
safety_model_path = config.cache_dir
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
safety_model_id,
local_files_only=True,

View File

@ -18,15 +18,15 @@ from huggingface_hub import (
)
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals
from invokeai.app.services.config import get_invokeai_config
class HuggingFaceConceptsLibrary(object):
def __init__(self, root=None):
"""
Initialize the Concepts object. May optionally pass a root directory.
"""
self.root = root or Globals.root
self.config = get_invokeai_config()
self.root = root or self.config.root
self.hf_api = HfApi()
self.local_concepts = dict()
self.concept_list = None
@ -58,7 +58,7 @@ class HuggingFaceConceptsLibrary(object):
self.concept_list.extend(list(local_concepts_to_add))
return self.concept_list
return self.concept_list
elif Globals.internet_available is True:
elif self.config.internet_available is True:
try:
models = self.hf_api.list_models(
filter=ModelFilter(model_name="sd-concepts-library/")

View File

@ -33,8 +33,7 @@ from torchvision.transforms.functional import resize as tv_resize
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from typing_extensions import ParamSpec
from invokeai.backend.globals import Globals
from invokeai.app.services.config import get_invokeai_config
from ..util import CPU_DEVICE, normalize_device
from .diffusion import (
AttentionMapSaver,
@ -44,7 +43,6 @@ from .diffusion import (
from .offloading import FullyLoadedModelGroup, LazilyLoadedModelGroup, ModelGroup
from .textual_inversion_manager import TextualInversionManager
@dataclass
class PipelineIntermediateState:
run_id: str
@ -348,10 +346,11 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
"""
if xformers is available, use it, otherwise use sliced attention.
"""
config = get_invokeai_config()
if (
torch.cuda.is_available()
and is_xformers_available()
and not Globals.disable_xformers
and not config.disable_xformers
):
self.enable_xformers_memory_efficient_attention()
else:
@ -548,8 +547,9 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
additional_guidance = []
extra_conditioning_info = conditioning_data.extra
with self.invokeai_diffuser.custom_attention_context(
extra_conditioning_info=extra_conditioning_info,
step_count=len(self.scheduler.timesteps),
self.invokeai_diffuser.model,
extra_conditioning_info=extra_conditioning_info,
step_count=len(self.scheduler.timesteps),
):
yield PipelineIntermediateState(
run_id=run_id,

View File

@ -10,6 +10,7 @@ import diffusers
import psutil
import torch
from compel.cross_attention_control import Arguments
from diffusers.models.unet_2d_condition import UNet2DConditionModel
from diffusers.models.attention_processor import AttentionProcessor
from torch import nn
@ -352,8 +353,7 @@ def restore_default_cross_attention(
else:
remove_attention_function(model)
def override_cross_attention(model, context: Context, is_running_diffusers=False):
def setup_cross_attention_control_attention_processors(unet: UNet2DConditionModel, context: Context):
"""
Inject attention parameters and functions into the passed in model to enable cross attention editing.
@ -372,37 +372,22 @@ def override_cross_attention(model, context: Context, is_running_diffusers=False
indices = torch.arange(max_length, dtype=torch.long)
for name, a0, a1, b0, b1 in context.arguments.edit_opcodes:
if b0 < max_length:
if name == "equal": # or (name == "replace" and a1 - a0 == b1 - b0):
if name == "equal":# or (name == "replace" and a1 - a0 == b1 - b0):
# these tokens have not been edited
indices[b0:b1] = indices_target[a0:a1]
mask[b0:b1] = 1
context.cross_attention_mask = mask.to(device)
context.cross_attention_index_map = indices.to(device)
if is_running_diffusers:
unet = model
old_attn_processors = unet.attn_processors
if torch.backends.mps.is_available():
# see note in StableDiffusionGeneratorPipeline.__init__ about borked slicing on MPS
unet.set_attn_processor(SwapCrossAttnProcessor())
else:
# try to re-use an existing slice size
default_slice_size = 4
slice_size = next(
(
p.slice_size
for p in old_attn_processors.values()
if type(p) is SlicedAttnProcessor
),
default_slice_size,
)
unet.set_attn_processor(SlicedSwapCrossAttnProcesser(slice_size=slice_size))
return old_attn_processors
old_attn_processors = unet.attn_processors
if torch.backends.mps.is_available():
# see note in StableDiffusionGeneratorPipeline.__init__ about borked slicing on MPS
unet.set_attn_processor(SwapCrossAttnProcessor())
else:
context.register_cross_attention_modules(model)
inject_attention_function(model, context)
return None
# try to re-use an existing slice size
default_slice_size = 4
slice_size = next((p.slice_size for p in old_attn_processors.values() if type(p) is SlicedAttnProcessor), default_slice_size)
unet.set_attn_processor(SlicedSwapCrossAttnProcesser(slice_size=slice_size))
def get_cross_attention_modules(
model, which: CrossAttentionType

View File

@ -5,11 +5,12 @@ from typing import Any, Callable, Dict, Optional, Union
import numpy as np
import torch
from diffusers import UNet2DConditionModel
from diffusers.models.attention_processor import AttentionProcessor
from typing_extensions import TypeAlias
import invokeai.backend.util.logging as logger
from invokeai.backend.globals import Globals
from invokeai.app.services.config import get_invokeai_config
from .cross_attention_control import (
Arguments,
@ -17,8 +18,8 @@ from .cross_attention_control import (
CrossAttentionType,
SwapCrossAttnContext,
get_cross_attention_modules,
override_cross_attention,
restore_default_cross_attention,
setup_cross_attention_control_attention_processors,
)
from .cross_attention_map_saving import AttentionMapSaver
@ -31,7 +32,6 @@ ModelForwardCallback: TypeAlias = Union[
Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor],
]
@dataclass(frozen=True)
class PostprocessingSettings:
threshold: float
@ -72,31 +72,43 @@ class InvokeAIDiffuserComponent:
:param model: the unet model to pass through to cross attention control
:param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning)
"""
config = get_invokeai_config()
self.conditioning = None
self.model = model
self.is_running_diffusers = is_running_diffusers
self.model_forward_callback = model_forward_callback
self.cross_attention_control_context = None
self.sequential_guidance = Globals.sequential_guidance
self.sequential_guidance = config.sequential_guidance
@classmethod
@contextmanager
def custom_attention_context(
self, extra_conditioning_info: Optional[ExtraConditioningInfo], step_count: int
cls,
unet: UNet2DConditionModel, # note: also may futz with the text encoder depending on requested LoRAs
extra_conditioning_info: Optional[ExtraConditioningInfo],
step_count: int
):
do_swap = (
extra_conditioning_info is not None
and extra_conditioning_info.wants_cross_attention_control
)
old_attn_processor = None
if do_swap:
old_attn_processor = self.override_cross_attention(
extra_conditioning_info, step_count=step_count
)
old_attn_processors = None
if extra_conditioning_info and (
extra_conditioning_info.wants_cross_attention_control
):
old_attn_processors = unet.attn_processors
# Load lora conditions into the model
if extra_conditioning_info.wants_cross_attention_control:
cross_attention_control_context = Context(
arguments=extra_conditioning_info.cross_attention_control_args,
step_count=step_count,
)
setup_cross_attention_control_attention_processors(
unet,
cross_attention_control_context,
)
try:
yield None
finally:
if old_attn_processor is not None:
self.restore_default_cross_attention(old_attn_processor)
if old_attn_processors is not None:
unet.set_attn_processor(old_attn_processors)
# TODO resuscitate attention map saving
# self.remove_attention_map_saving()

View File

@ -9,7 +9,8 @@ SCHEDULER_MAP = dict(
deis=(DEISMultistepScheduler, dict()),
lms=(LMSDiscreteScheduler, dict()),
pndm=(PNDMScheduler, dict()),
heun=(HeunDiscreteScheduler, dict()),
heun=(HeunDiscreteScheduler, dict(use_karras_sigmas=False)),
heun_k=(HeunDiscreteScheduler, dict(use_karras_sigmas=True)),
euler=(EulerDiscreteScheduler, dict(use_karras_sigmas=False)),
euler_k=(EulerDiscreteScheduler, dict(use_karras_sigmas=True)),
euler_a=(EulerAncestralDiscreteScheduler, dict()),

View File

@ -7,7 +7,6 @@
This is the backend to "textual_inversion.py"
"""
import argparse
import logging
import math
import os
@ -47,8 +46,7 @@ from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
# invokeai stuff
from ..args import ArgFormatter, PagingArgumentParser
from ..globals import Globals, global_cache_dir
from invokeai.app.services.config import InvokeAIAppConfig,PagingArgumentParser
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
PIL_INTERPOLATION = {
@ -90,8 +88,9 @@ def save_progress(
def parse_args():
config = InvokeAIAppConfig(argv=[])
parser = PagingArgumentParser(
description="Textual inversion training", formatter_class=ArgFormatter
description="Textual inversion training"
)
general_group = parser.add_argument_group("General")
model_group = parser.add_argument_group("Models and Paths")
@ -112,7 +111,7 @@ def parse_args():
"--root_dir",
"--root",
type=Path,
default=Globals.root,
default=config.root,
help="Path to the invokeai runtime directory",
)
general_group.add_argument(
@ -127,7 +126,7 @@ def parse_args():
general_group.add_argument(
"--output_dir",
type=Path,
default=f"{Globals.root}/text-inversion-model",
default=f"{config.root}/text-inversion-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
model_group.add_argument(
@ -528,6 +527,7 @@ def get_full_repo_name(
def do_textual_inversion_training(
config: InvokeAIAppConfig,
model: str,
train_data_dir: Path,
output_dir: Path,
@ -580,7 +580,7 @@ def do_textual_inversion_training(
# setting up things the way invokeai expects them
if not os.path.isabs(output_dir):
output_dir = os.path.join(Globals.root, output_dir)
output_dir = os.path.join(config.root, output_dir)
logging_dir = output_dir / logging_dir
@ -628,7 +628,7 @@ def do_textual_inversion_training(
elif output_dir is not None:
os.makedirs(output_dir, exist_ok=True)
models_conf = OmegaConf.load(os.path.join(Globals.root, "configs/models.yaml"))
models_conf = OmegaConf.load(config.model_conf_path)
model_conf = models_conf.get(model, None)
assert model_conf is not None, f"Unknown model: {model}"
assert (
@ -640,7 +640,7 @@ def do_textual_inversion_training(
assert (
pretrained_model_name_or_path
), f"models.yaml error: neither 'repo_id' nor 'path' is defined for {model}"
pipeline_args = dict(cache_dir=global_cache_dir("hub"))
pipeline_args = dict(cache_dir=config.cache_dir)
# Load tokenizer
if tokenizer_name:

View File

@ -4,17 +4,16 @@ from contextlib import nullcontext
import torch
from torch import autocast
from invokeai.backend.globals import Globals
from invokeai.app.services.config import get_invokeai_config
CPU_DEVICE = torch.device("cpu")
CUDA_DEVICE = torch.device("cuda")
MPS_DEVICE = torch.device("mps")
def choose_torch_device() -> torch.device:
"""Convenience routine for guessing which GPU device to run model on"""
if Globals.always_use_cpu:
config = get_invokeai_config()
if config.always_use_cpu:
return CPU_DEVICE
if torch.cuda.is_available():
return torch.device("cuda")
@ -33,7 +32,8 @@ def choose_precision(device: torch.device) -> str:
def torch_dtype(device: torch.device) -> torch.dtype:
if Globals.full_precision:
config = get_invokeai_config()
if config.full_precision:
return torch.float32
if choose_precision(device) == "float16":
return torch.float16

View File

@ -2,34 +2,37 @@
"""invokeai.util.logging
Logging class for InvokeAI that produces console messages that follow
the conventions established in InvokeAI 1.X through 2.X.
Logging class for InvokeAI that produces console messages
One way to use it:
Usage:
from invokeai.backend.util.logging import InvokeAILogger
logger = InvokeAILogger.getLogger(__name__)
logger.critical('this is critical')
logger.error('this is an error')
logger.warning('this is a warning')
logger.info('this is info')
logger.debug('this is debugging')
logger = InvokeAILogger.getLogger(name='InvokeAI') // Initialization
(or)
logger = InvokeAILogger.getLogger(__name__) // To use the filename
logger.critical('this is critical') // Critical Message
logger.error('this is an error') // Error Message
logger.warning('this is a warning') // Warning Message
logger.info('this is info') // Info Message
logger.debug('this is debugging') // Debug Message
Console messages:
### this is critical
*** this is an error ***
** this is a warning
>> this is info
| this is debugging
[12-05-2023 20]::[InvokeAI]::CRITICAL --> This is an info message [In Bold Red]
[12-05-2023 20]::[InvokeAI]::ERROR --> This is an info message [In Red]
[12-05-2023 20]::[InvokeAI]::WARNING --> This is an info message [In Yellow]
[12-05-2023 20]::[InvokeAI]::INFO --> This is an info message [In Grey]
[12-05-2023 20]::[InvokeAI]::DEBUG --> This is an info message [In Grey]
Another way:
import invokeai.backend.util.logging as ialog
ialogger.debug('this is a debugging message')
Alternate Method (in this case the logger name will be set to InvokeAI):
import invokeai.backend.util.logging as IAILogger
IAILogger.debug('this is a debugging message')
"""
import logging
# module level functions
def debug(msg, *args, **kwargs):
InvokeAILogger.getLogger().debug(msg, *args, **kwargs)
@ -42,7 +45,7 @@ def warning(msg, *args, **kwargs):
def error(msg, *args, **kwargs):
InvokeAILogger.getLogger().error(msg, *args, **kwargs)
def critical(msg, *args, **kwargs):
InvokeAILogger.getLogger().critical(msg, *args, **kwargs)
@ -55,49 +58,47 @@ def disable(level=logging.CRITICAL):
def basicConfig(**kwargs):
InvokeAILogger.getLogger().basicConfig(**kwargs)
def getLogger(name: str=None)->logging.Logger:
def getLogger(name: str = None) -> logging.Logger:
return InvokeAILogger.getLogger(name)
class InvokeAILogFormatter(logging.Formatter):
'''
Repurposed from:
https://stackoverflow.com/questions/14844970/modifying-logging-message-format-based-on-message-logging-level-in-python3
Custom Formatting for the InvokeAI Logger
'''
crit_fmt = "### %(msg)s"
err_fmt = "*** %(msg)s"
warn_fmt = "** %(msg)s"
info_fmt = ">> %(msg)s"
dbg_fmt = " | %(msg)s"
def __init__(self):
super().__init__(fmt="%(levelno)d: %(msg)s", datefmt=None, style='%')
# Color Codes
grey = "\x1b[38;20m"
yellow = "\x1b[33;20m"
red = "\x1b[31;20m"
cyan = "\x1b[36;20m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
# Log Format
format = "[%(asctime)s]::[%(name)s]::%(levelname)s --> %(message)s"
## More Formatting Options: %(pathname)s, %(filename)s, %(module)s, %(lineno)d
# Format Map
FORMATS = {
logging.DEBUG: cyan + format + reset,
logging.INFO: grey + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset
}
def format(self, record):
# Remember the format used when the logging module
# was installed (in the event that this formatter is
# used with the vanilla logging module.
format_orig = self._style._fmt
if record.levelno == logging.DEBUG:
self._style._fmt = InvokeAILogFormatter.dbg_fmt
if record.levelno == logging.INFO:
self._style._fmt = InvokeAILogFormatter.info_fmt
if record.levelno == logging.WARNING:
self._style._fmt = InvokeAILogFormatter.warn_fmt
if record.levelno == logging.ERROR:
self._style._fmt = InvokeAILogFormatter.err_fmt
if record.levelno == logging.CRITICAL:
self._style._fmt = InvokeAILogFormatter.crit_fmt
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt, datefmt="%d-%m-%Y %H:%M:%S")
return formatter.format(record)
# parent class does the work
result = super().format(record)
self._style._fmt = format_orig
return result
class InvokeAILogger(object):
loggers = dict()
@classmethod
def getLogger(self, name:str='invokeai')->logging.Logger:
def getLogger(self, name: str = 'InvokeAI') -> logging.Logger:
if name not in self.loggers:
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)

View File

@ -9,6 +9,7 @@ SAMPLER_CHOICES = [
"lms",
"pndm",
"heun",
'heun_k',
"euler",
"euler_k",
"euler_a",