adjust non-app modules to use new config system

This commit is contained in:
Lincoln Stein
2023-05-04 00:43:51 -04:00
parent 15ffb53e59
commit e4196bbe5b
18 changed files with 84 additions and 98 deletions

View File

@ -44,28 +44,29 @@ from ...frontend.install.widgets import (
IntTitleSlider,
set_min_terminal_size,
)
from ..args import PRECISION_CHOICES, Args
from ..globals import Globals, global_cache_dir, global_config_dir, global_config_file
from .model_install_backend import (
default_dataset,
download_from_hf,
hf_download_with_resume,
recommended_datasets,
)
from invokeai.app.services.config import get_invokeai_config()
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()
# --------------------------globals-----------------------
config = get_invokeai_config()
Model_dir = "models"
Weights_dir = "ldm/stable-diffusion-v1/"
# the initial "configs" dir is now bundled in the `invokeai.configs` package
Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml"
Default_config_file = Path(global_config_dir()) / "models.yaml"
SD_Configs = Path(global_config_dir()) / "stable-diffusion"
Default_config_file = config.model_conf_path
SD_Configs = config.legacy_conf_path
Datasets = OmegaConf.load(Dataset_path)
@ -73,6 +74,8 @@ Datasets = OmegaConf.load(Dataset_path)
MIN_COLS = 135
MIN_LINES = 45
PRECISION_CHOICES = ['auto','float16','float32','autocast']
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
# This is the InvokeAI initialization file, which contains command-line default values.
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
@ -103,7 +106,7 @@ Command-line interface:
invokeai
If you installed using an installation script, run:
{Globals.root}/invoke.{"bat" if sys.platform == "win32" else "sh"}
{config.root}/invoke.{"bat" if sys.platform == "win32" else "sh"}
Add the '--help' argument to see all of the command-line switches available for use.
"""
@ -216,11 +219,11 @@ def download_realesrgan():
wdn_model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth"
model_dest = os.path.join(
Globals.root, "models/realesrgan/realesr-general-x4v3.pth"
config.root, "models/realesrgan/realesr-general-x4v3.pth"
)
wdn_model_dest = os.path.join(
Globals.root, "models/realesrgan/realesr-general-wdn-x4v3.pth"
config.root, "models/realesrgan/realesr-general-wdn-x4v3.pth"
)
download_with_progress_bar(model_url, model_dest, "RealESRGAN")
@ -243,7 +246,7 @@ def download_gfpgan():
"./models/gfpgan/weights/parsing_parsenet.pth",
],
):
model_url, model_dest = model[0], os.path.join(Globals.root, model[1])
model_url, model_dest = model[0], os.path.join(config.root, model[1])
download_with_progress_bar(model_url, model_dest, "GFPGAN weights")
@ -253,7 +256,7 @@ def download_codeformer():
model_url = (
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
)
model_dest = os.path.join(Globals.root, "models/codeformer/codeformer.pth")
model_dest = os.path.join(config.root, "models/codeformer/codeformer.pth")
download_with_progress_bar(model_url, model_dest, "CodeFormer")
@ -306,7 +309,7 @@ def download_vaes():
if not hf_download_with_resume(
repo_id=repo_id,
model_name=model_name,
model_dir=str(Globals.root / Model_dir / Weights_dir),
model_dir=str(config.root / Model_dir / Weights_dir),
):
raise Exception(f"download of {model_name} failed")
except Exception as e:
@ -321,8 +324,7 @@ def get_root(root: str = None) -> str:
elif os.environ.get("INVOKEAI_ROOT"):
return os.environ.get("INVOKEAI_ROOT")
else:
return Globals.root
return config.root
# -------------------------------------
class editOptsForm(npyscreen.FormMultiPage):
@ -332,7 +334,7 @@ class editOptsForm(npyscreen.FormMultiPage):
def create(self):
program_opts = self.parentApp.program_opts
old_opts = self.parentApp.invokeai_opts
first_time = not (Globals.root / Globals.initfile).exists()
first_time = not (config.root / 'invokeai.init').exists()
access_token = HfFolder.get_token()
window_width, window_height = get_terminal_size()
for i in [
@ -384,7 +386,7 @@ class editOptsForm(npyscreen.FormMultiPage):
self.safety_checker = self.add_widget_intelligent(
npyscreen.Checkbox,
name="NSFW checker",
value=old_opts.safety_checker,
value=old_opts.nsfw_checker,
relx=5,
scroll_exit=True,
)
@ -438,14 +440,7 @@ class editOptsForm(npyscreen.FormMultiPage):
self.xformers = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Enable xformers support if available",
value=old_opts.xformers,
relx=5,
scroll_exit=True,
)
self.ckpt_convert = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Load legacy checkpoint models into memory as diffusers models",
value=old_opts.ckpt_convert,
value=old_opts.xformers_enabled,
relx=5,
scroll_exit=True,
)
@ -583,7 +578,6 @@ class editOptsForm(npyscreen.FormMultiPage):
"xformers",
"always_use_cpu",
"embedding_path",
"ckpt_convert",
]:
setattr(new_opts, attr, getattr(self, attr).value)
@ -628,15 +622,14 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam
def default_startup_options(init_file: Path) -> Namespace:
opts = Args().parse_args([])
opts = InvokeAIAppConfig(argv=[])
outdir = Path(opts.outdir)
if not outdir.is_absolute():
opts.outdir = str(Globals.root / opts.outdir)
opts.outdir = str(config.root / opts.outdir)
if not init_file.exists():
opts.safety_checker = True
opts.nsfw_checker = True
return opts
def default_user_selections(program_opts: Namespace) -> Namespace:
return Namespace(
starter_models=default_dataset()
@ -724,7 +717,6 @@ def write_opts(opts: Namespace, init_file: Path):
--max_loaded_models={int(opts.max_loaded_models)}
--{'no-' if not opts.safety_checker else ''}nsfw_checker
--{'no-' if not opts.xformers else ''}xformers
--{'no-' if not opts.ckpt_convert else ''}ckpt_convert
{'--free_gpu_mem' if opts.free_gpu_mem else ''}
{'--always_use_cpu' if opts.always_use_cpu else ''}
"""
@ -740,13 +732,11 @@ def write_opts(opts: Namespace, init_file: Path):
# -------------------------------------
def default_output_dir() -> Path:
return Globals.root / "outputs"
return config.root / "outputs"
# -------------------------------------
def default_embedding_dir() -> Path:
return Globals.root / "embeddings"
return config.root / "embeddings"
# -------------------------------------
def write_default_options(program_opts: Namespace, initfile: Path):
@ -810,7 +800,7 @@ def main():
opt = parser.parse_args()
# setting a global here
Globals.root = Path(os.path.expanduser(get_root(opt.root) or ""))
config.root = Path(os.path.expanduser(get_root(opt.root) or ""))
errors = set()
@ -818,9 +808,10 @@ def main():
models_to_download = default_user_selections(opt)
# We check for to see if the runtime directory is correctly initialized.
init_file = Path(Globals.root, Globals.initfile)
if not init_file.exists() or not global_config_file().exists():
initialize_rootdir(Globals.root, opt.yes_to_all)
print('** invokeai.init init file is no longer supported. Migrate this code to invokeai.yaml **')
init_file = Path(config.root, 'invokeai.init')
if not init_file.exists() or not config.model_conf_path.exists():
initialize_rootdir(config.root, opt.yes_to_all)
if opt.yes_to_all:
write_default_options(opt, init_file)

View File

@ -19,13 +19,15 @@ from tqdm import tqdm
import invokeai.configs as configs
from ..globals import Globals, global_cache_dir, global_config_dir
from invokeai.app.services.config import get_invokeai_config()
from ..model_management import ModelManager
from ..stable_diffusion import StableDiffusionGeneratorPipeline
warnings.filterwarnings("ignore")
# --------------------------globals-----------------------
config = get_invokeai_config()
Model_dir = "models"
Weights_dir = "ldm/stable-diffusion-v1/"
@ -47,12 +49,11 @@ Config_preamble = """
def default_config_file():
return Path(global_config_dir()) / "models.yaml"
return config.model_conf_path
def sd_configs():
return Path(global_config_dir()) / "stable-diffusion"
return config.legacy_conf_path
def initial_models():
global Datasets
@ -121,8 +122,9 @@ def install_requested_models(
if scan_at_startup and scan_directory.is_dir():
argument = "--autoconvert"
initfile = Path(Globals.root, Globals.initfile)
replacement = Path(Globals.root, f"{Globals.initfile}.new")
print('** The global initfile is no longer supported; rewrite to support new yaml format **')
initfile = Path(config.root, 'invokeai.init')
replacement = Path(config.root, f"invokeai.init.new")
directory = str(scan_directory).replace("\\", "/")
with open(initfile, "r") as input:
with open(replacement, "w") as output:
@ -150,7 +152,7 @@ def get_root(root: str = None) -> str:
elif os.environ.get("INVOKEAI_ROOT"):
return os.environ.get("INVOKEAI_ROOT")
else:
return Globals.root
return config.root
# ---------------------------------------------
@ -183,7 +185,7 @@ def all_datasets() -> dict:
# look for legacy model.ckpt in models directory and offer to
# normalize its name
def migrate_models_ckpt():
model_path = os.path.join(Globals.root, Model_dir, Weights_dir)
model_path = os.path.join(config.root, Model_dir, Weights_dir)
if not os.path.exists(os.path.join(model_path, "model.ckpt")):
return
new_name = initial_models()["stable-diffusion-1.4"]["file"]
@ -228,7 +230,7 @@ def _download_repo_or_file(
def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
repo_id = mconfig["repo_id"]
filename = mconfig["file"]
cache_dir = os.path.join(Globals.root, Model_dir, Weights_dir)
cache_dir = os.path.join(config.root, Model_dir, Weights_dir)
return hf_download_with_resume(
repo_id=repo_id,
model_dir=cache_dir,
@ -239,9 +241,9 @@ def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
# ---------------------------------------------
def download_from_hf(
model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs
model_class: object, model_name: str, **kwargs
):
path = global_cache_dir(cache_subdir)
path = config.cache_dir
model = model_class.from_pretrained(
model_name,
cache_dir=path,
@ -417,7 +419,7 @@ def new_config_file_contents(
stanza["height"] = mod["height"]
if "file" in mod:
stanza["weights"] = os.path.relpath(
successfully_downloaded[model], start=Globals.root
successfully_downloaded[model], start=config.root
)
stanza["config"] = os.path.normpath(
os.path.join(sd_configs(), mod["config"])
@ -456,7 +458,7 @@ def delete_weights(model_name: str, conf_stanza: dict):
weights = Path(weights)
if not weights.is_absolute():
weights = Path(Globals.root) / weights
weights = Path(config.root) / weights
try:
weights.unlink()
except OSError as e:

View File

@ -6,9 +6,9 @@ be suppressed or deferred
"""
import numpy as np
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config import get_invokeai_config
config = InvokeAIAppConfig()
config = get_invokeai_config()
class PatchMatch:
"""

View File

@ -33,11 +33,11 @@ from PIL import Image, ImageOps
from transformers import AutoProcessor, CLIPSegForImageSegmentation
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config import get_invokeai_config
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
CLIPSEG_SIZE = 352
config = InvokeAIAppConfig()
config = get_invokeai_config()
class SegmentedGrayscale(object):
def __init__(self, image: Image, heatmap: torch.Tensor):

View File

@ -26,7 +26,7 @@ import torch
from safetensors.torch import load_file
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config import get_invokeai_config
from .model_manager import ModelManager, SDLegacyType
@ -73,7 +73,7 @@ from transformers import (
from ..stable_diffusion import StableDiffusionGeneratorPipeline
config = InvokeAIAppConfig()
config = get_invokeai_config()
def shave_segments(path, n_shave_prefix_segments=1):
"""

View File

@ -47,7 +47,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import (
from ..stable_diffusion import (
StableDiffusionGeneratorPipeline,
)
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config import get_invokeai_config
from ..util import CUDA_DEVICE, ask_user, download_with_resume
class SDLegacyType(Enum):
@ -68,7 +68,7 @@ class SDModelComponent(Enum):
feature_extractor="feature_extractor"
DEFAULT_MAX_MODELS = 2
config = InvokeAIAppConfig()
config = get_invokeai_config()
class ModelManager(object):
"""

View File

@ -20,11 +20,11 @@ from compel.prompt_parser import (
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config import get_invokeai_config
from ..stable_diffusion import InvokeAIDiffuserComponent
from ..util import torch_dtype
config = InvokeAIAppConfig()
config = get_invokeai_config()
def get_uc_and_c_and_ec(
prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False

View File

@ -6,8 +6,8 @@ import numpy as np
import torch
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
config = InvokeAIAppConfig()
from invokeai.app.services.config import get_invokeai_config
config = get_invokeai_config()
pretrained_model_url = (
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"

View File

@ -7,8 +7,8 @@ import torch
from PIL import Image
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
config = InvokeAIAppConfig()
from invokeai.app.services.config import get_invokeai_config
config = get_invokeai_config()
class GFPGAN:
def __init__(self, gfpgan_model_path="models/gfpgan/GFPGANv1.4.pth") -> None:

View File

@ -15,10 +15,10 @@ from transformers import AutoFeatureExtractor
import invokeai.assets.web as web_assets
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config import get_invokeai_config
from .util import CPU_DEVICE
config = InvokeAIAppConfig()
config = get_invokeai_config()
class SafetyChecker(object):
CAUTION_IMG = "caution.png"

View File

@ -18,8 +18,8 @@ from huggingface_hub import (
)
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
config = InvokeAIAppConfig()
from invokeai.app.services.config import get_invokeai_config
config = get_invokeai_config()
class HuggingFaceConceptsLibrary(object):
def __init__(self, root=None):

View File

@ -33,7 +33,7 @@ from torchvision.transforms.functional import resize as tv_resize
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from typing_extensions import ParamSpec
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config import get_invokeai_config
from ..util import CPU_DEVICE, normalize_device
from .diffusion import (
AttentionMapSaver,
@ -43,7 +43,7 @@ from .diffusion import (
from .offloading import FullyLoadedModelGroup, LazilyLoadedModelGroup, ModelGroup
from .textual_inversion_manager import TextualInversionManager
config = InvokeAIAppConfig()
config = get_invokeai_config()
@dataclass
class PipelineIntermediateState:

View File

@ -9,7 +9,7 @@ from diffusers.models.attention_processor import AttentionProcessor
from typing_extensions import TypeAlias
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config import get_invokeai_config
from .cross_attention_control import (
Arguments,
@ -31,7 +31,7 @@ ModelForwardCallback: TypeAlias = Union[
Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor],
]
config = InvokeAIAppConfig()
config = get_invokeai_config()
@dataclass(frozen=True)
class PostprocessingSettings:

View File

@ -4,12 +4,12 @@ from contextlib import nullcontext
import torch
from torch import autocast
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.services.config import get_invokeai_config
CPU_DEVICE = torch.device("cpu")
CUDA_DEVICE = torch.device("cuda")
MPS_DEVICE = torch.device("mps")
config = InvokeAIAppConfig()
config = get_invokeai_config()
def choose_torch_device() -> torch.device:
"""Convenience routine for guessing which GPU device to run model on"""