diff --git a/.github/workflows/test-invoke-pip.yml b/.github/workflows/test-invoke-pip.yml index 785358b30a..21fda2d191 100644 --- a/.github/workflows/test-invoke-pip.yml +++ b/.github/workflows/test-invoke-pip.yml @@ -80,12 +80,7 @@ jobs: uses: actions/checkout@v3 - name: set test prompt to main branch validation - if: ${{ github.ref == 'refs/heads/main' }} - run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }} - - - name: set test prompt to Pull Request validation - if: ${{ github.ref != 'refs/heads/main' }} - run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }} + run:echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }} - name: setup python uses: actions/setup-python@v4 @@ -131,7 +126,7 @@ jobs: --precision=float32 --always_use_cpu --outdir ${{ env.INVOKEAI_OUTDIR }}/${{ matrix.python-version }}/${{ matrix.pytorch }} - < ${{ env.TEST_PROMPTS }} + --from_file ${{ env.TEST_PROMPTS }} - name: Archive results id: archive-results diff --git a/invokeai/app/cli_app.py b/invokeai/app/cli_app.py index 6b3ff4d2e1..9f2705d800 100644 --- a/invokeai/app/cli_app.py +++ b/invokeai/app/cli_app.py @@ -4,6 +4,7 @@ import argparse import os import re import shlex +import sys import time from typing import ( Union, @@ -195,6 +196,11 @@ def invoke_cli(): parser = config.get_parser() parser.add_argument('commands',nargs='*') invocation_commands = parser.parse_args().commands + + # get the optional file to read commands from. + # Simplest is to use it for STDIN + if infile := config.from_file: + sys.stdin = open(infile,"r") model_manager = get_model_manager(config,logger=logger) diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index e9f94a5fb9..2d87125744 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 Lincoln Stein (https://github.com/lstein) +# Copyright (c) 2023 Lincoln Stein (https://github.com/lstein) and the InvokeAI Development Team '''Invokeai configuration system. @@ -206,8 +206,16 @@ class InvokeAISettings(BaseSettings): if cls.initconf and settings_stanza in cls.initconf \ else OmegaConf.create() + # create an upcase version of the environment in + # order to achieve case-insensitive environment + # variables (the way Windows does) + upcase_environ = dict() + for key,value in os.environ.items(): + upcase_environ[key.upper()] = value + fields = cls.__fields__ cls.argparse_groups = {} + for name, field in fields.items(): if name not in cls._excluded(): current_default = field.default @@ -216,8 +224,8 @@ class InvokeAISettings(BaseSettings): env_name = env_prefix + '_' + name if category in initconf and name in initconf.get(category): field.default = initconf.get(category).get(name) - if env_name in os.environ: - field.default = os.environ[env_name] + if env_name.upper() in upcase_environ: + field.default = upcase_environ[env_name.upper()] cls.add_field_argument(parser, name, field) field.default = current_default @@ -353,6 +361,7 @@ setting environment variables INVOKEAI_. legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths') lora_dir : Path = Field(default='loras', description='Path to InvokeAI LoRA model directory', category='Paths') outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths') + from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths') model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models') embeddings : bool = Field(default=True, description='Load contents of embeddings directory', category='Models') @@ -502,11 +511,11 @@ class PagingArgumentParser(argparse.ArgumentParser): text = self.format_help() pydoc.pager(text) -def get_invokeai_config(cls:Type[InvokeAISettings]=InvokeAIAppConfig)->InvokeAISettings: +def get_invokeai_config(cls:Type[InvokeAISettings]=InvokeAIAppConfig,**kwargs)->InvokeAISettings: ''' This returns a singleton InvokeAIAppConfig configuration object. ''' global global_config if global_config is None or type(global_config)!=cls: - global_config = cls() + global_config = cls(**kwargs) return global_config diff --git a/invokeai/backend/config/invokeai_configure.py b/invokeai/backend/config/invokeai_configure.py index 4d35ab251c..59f11d35bc 100755 --- a/invokeai/backend/config/invokeai_configure.py +++ b/invokeai/backend/config/invokeai_configure.py @@ -389,8 +389,8 @@ class editOptsForm(npyscreen.FormMultiPage): ) self.nextrely += 1 for i in [ - "If you have an account at HuggingFace you may paste your access token here", - 'to allow InvokeAI to download styles & subjects from the "Concept Library".', + "If you have an account at HuggingFace you may optionally paste your access token here", + 'to allow InvokeAI to download restricted styles & subjects from the "Concept Library".', "See https://huggingface.co/settings/tokens", ]: self.add_widget_intelligent( @@ -593,6 +593,9 @@ class editOptsForm(npyscreen.FormMultiPage): new_opts.hf_token = self.hf_token.value new_opts.license_acceptance = self.license_acceptance.value new_opts.precision = PRECISION_CHOICES[self.precision.value[0]] + + # widget library workaround to make max_loaded_models an int rather than a float + new_opts.max_loaded_models = int(new_opts.max_loaded_models) return new_opts diff --git a/invokeai/backend/image_util/patchmatch.py b/invokeai/backend/image_util/patchmatch.py index dcd2effb19..0d2221be41 100644 --- a/invokeai/backend/image_util/patchmatch.py +++ b/invokeai/backend/image_util/patchmatch.py @@ -8,8 +8,6 @@ import numpy as np import invokeai.backend.util.logging as logger from invokeai.app.services.config import get_invokeai_config -config = get_invokeai_config() - class PatchMatch: """ Thin class wrapper around the patchmatch function. @@ -23,6 +21,7 @@ class PatchMatch: @classmethod def _load_patch_match(self): + config = get_invokeai_config() if self.tried_load: return if config.try_patchmatch: diff --git a/invokeai/backend/image_util/txt2mask.py b/invokeai/backend/image_util/txt2mask.py index 3e39bd677a..1a8fcfeb90 100644 --- a/invokeai/backend/image_util/txt2mask.py +++ b/invokeai/backend/image_util/txt2mask.py @@ -37,7 +37,6 @@ from invokeai.app.services.config import get_invokeai_config CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined" CLIPSEG_SIZE = 352 -config = get_invokeai_config() class SegmentedGrayscale(object): def __init__(self, image: Image, heatmap: torch.Tensor): @@ -84,6 +83,7 @@ class Txt2Mask(object): def __init__(self, device="cpu", refined=False): logger.info("Initializing clipseg model for text to mask inference") + config = get_invokeai_config() # BUG: we are not doing anything with the device option at this time self.device = device diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 4e2ee56220..467fe39155 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -74,8 +74,6 @@ from transformers import ( from ..stable_diffusion import StableDiffusionGeneratorPipeline -config = get_invokeai_config() - def shave_segments(path, n_shave_prefix_segments=1): """ Removes segments. Positive values shave the first segments, negative shave the last segments. @@ -844,7 +842,7 @@ def convert_ldm_bert_checkpoint(checkpoint, config): def convert_ldm_clip_checkpoint(checkpoint): text_model = CLIPTextModel.from_pretrained( - "openai/clip-vit-large-patch14", cache_dir=config.cache_dir + "openai/clip-vit-large-patch14", cache_dir=get_invokeai_config().cache_dir ) keys = list(checkpoint.keys()) @@ -899,7 +897,7 @@ textenc_pattern = re.compile("|".join(protected.keys())) def convert_paint_by_example_checkpoint(checkpoint): - cache_dir = config.cache_dir + cache_dir = get_invokeai_config().cache_dir config = CLIPVisionConfig.from_pretrained( "openai/clip-vit-large-patch14", cache_dir=cache_dir ) @@ -971,7 +969,7 @@ def convert_paint_by_example_checkpoint(checkpoint): def convert_open_clip_checkpoint(checkpoint): - cache_dir = config.cache_dir + cache_dir = get_invokeai_config().cache_dir text_model = CLIPTextModel.from_pretrained( "stabilityai/stable-diffusion-2", subfolder="text_encoder", cache_dir=cache_dir ) @@ -1094,7 +1092,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt( :param vae: A diffusers VAE to load into the pipeline. :param vae_path: Path to a checkpoint VAE that will be converted into diffusers and loaded into the pipeline. """ - + config = get_invokeai_config() with warnings.catch_warnings(): warnings.simplefilter("ignore") verbosity = dlogging.get_verbosity() diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 7f70064c8d..bdbca195bd 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -68,7 +68,6 @@ class SDModelComponent(Enum): feature_extractor="feature_extractor" DEFAULT_MAX_MODELS = 2 -config = get_invokeai_config() class ModelManager(object): """ @@ -99,6 +98,7 @@ class ModelManager(object): if not isinstance(config, DictConfig): config = OmegaConf.load(config) self.config = config + self.globals = get_invokeai_config() self.precision = precision self.device = torch.device(device_type) self.max_loaded_models = max_loaded_models @@ -291,7 +291,7 @@ class ModelManager(object): """ # if we are converting legacy files automatically, then # there are no legacy ckpts! - if config.ckpt_convert: + if self.globals.ckpt_convert: return False info = self.model_info(model_name) if "weights" in info and info["weights"].endswith((".ckpt", ".safetensors")): @@ -501,13 +501,13 @@ class ModelManager(object): # TODO: scan weights maybe? pipeline_args: dict[str, Any] = dict( - safety_checker=None, local_files_only=not config.internet_available + safety_checker=None, local_files_only=not self.globals.internet_available ) if "vae" in mconfig and mconfig["vae"] is not None: if vae := self._load_vae(mconfig["vae"]): pipeline_args.update(vae=vae) if not isinstance(name_or_path, Path): - pipeline_args.update(cache_dir=config.cache_dir) + pipeline_args.update(cache_dir=self.globals.cache_dir) if using_fp16: pipeline_args.update(torch_dtype=torch.float16) fp_args_list = [{"revision": "fp16"}, {}] @@ -559,10 +559,9 @@ class ModelManager(object): width = mconfig.width height = mconfig.height - if not os.path.isabs(config): - config = os.path.join(config.root, config) - if not os.path.isabs(weights): - weights = os.path.normpath(os.path.join(config.root, weights)) + root_dir = self.globals.root_dir + config = str(root_dir / config) + weights = str(root_dir / weights) # Convert to diffusers and return a diffusers pipeline self.logger.info(f"Converting legacy checkpoint {model_name} into a diffusers model...") @@ -577,11 +576,7 @@ class ModelManager(object): vae_path = None if vae: - vae_path = ( - vae - if os.path.isabs(vae) - else os.path.normpath(os.path.join(config.root, vae)) - ) + vae_path = str(root_dir / vae) if self._has_cuda(): torch.cuda.empty_cache() pipeline = load_pipeline_from_original_stable_diffusion_ckpt( @@ -613,9 +608,7 @@ class ModelManager(object): ) if "path" in mconfig and mconfig["path"] is not None: - path = Path(mconfig["path"]) - if not path.is_absolute(): - path = Path(config.root, path).resolve() + path = self.globals.root_dir / Path(mconfig["path"]) return path elif "repo_id" in mconfig: return mconfig["repo_id"] @@ -863,16 +856,16 @@ class ModelManager(object): model_type = self.probe_model_type(checkpoint) if model_type == SDLegacyType.V1: self.logger.debug("SD-v1 model detected") - model_config_file = config.legacy_conf_path / "v1-inference.yaml" + model_config_file = self.globals.legacy_conf_path / "v1-inference.yaml" elif model_type == SDLegacyType.V1_INPAINT: self.logger.debug("SD-v1 inpainting model detected") - model_config_file = config.legacy_conf_path / "v1-inpainting-inference.yaml", + model_config_file = self.globals.legacy_conf_path / "v1-inpainting-inference.yaml", elif model_type == SDLegacyType.V2_v: self.logger.debug("SD-v2-v model detected") - model_config_file = config.legacy_conf_path / "v2-inference-v.yaml" + model_config_file = self.globals.legacy_conf_path / "v2-inference-v.yaml" elif model_type == SDLegacyType.V2_e: self.logger.debug("SD-v2-e model detected") - model_config_file = config.legacy_conf_path / "v2-inference.yaml" + model_config_file = self.globals.legacy_conf_path / "v2-inference.yaml" elif model_type == SDLegacyType.V2: self.logger.warning( f"{thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path." @@ -899,7 +892,7 @@ class ModelManager(object): self.logger.debug(f"Using VAE file {vae_path.name}") vae = None if vae_path else dict(repo_id="stabilityai/sd-vae-ft-mse") - diffuser_path = config.root / "models/converted_ckpts" / model_path.stem + diffuser_path = self.globals.root_dir / "models/converted_ckpts" / model_path.stem model_name = self.convert_and_import( model_path, diffusers_path=diffuser_path, @@ -1032,7 +1025,7 @@ class ModelManager(object): """ yaml_str = OmegaConf.to_yaml(self.config) if not os.path.isabs(config_file_path): - config_file_path = config.model_conf_path + config_file_path = self.globals.model_conf_path tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp") with open(tmpfile, "w", encoding="utf-8") as outfile: outfile.write(self.preamble()) @@ -1064,7 +1057,8 @@ class ModelManager(object): """ # Three transformer models to check: bert, clip and safety checker, and # the diffusers as well - models_dir = config.root / "models" + config = get_invokeai_config() + models_dir = config.root_dir / "models" legacy_locations = [ Path( models_dir, @@ -1138,13 +1132,12 @@ class ModelManager(object): if str(source).startswith(("http:", "https:", "ftp:")): dest_directory = Path(dest_directory) if not dest_directory.is_absolute(): - dest_directory = config.root / dest_directory + dest_directory = self.globals.root_dir / dest_directory dest_directory.mkdir(parents=True, exist_ok=True) resolved_path = download_with_resume(str(source), dest_directory) else: - if not os.path.isabs(source): - source = config.root / source - resolved_path = Path(source) + source = self.globals.root_dir / source + resolved_path = source return resolved_path def _invalidate_cached_model(self, model_name: str) -> None: @@ -1194,7 +1187,7 @@ class ModelManager(object): path = name_or_path else: owner, repo = name_or_path.split("/") - path = Path(config.cache_dir / f"models--{owner}--{repo}") + path = self.globals.cache_dir / f"models--{owner}--{repo}" if not path.exists(): return None hashpath = path / "checksum.sha256" @@ -1255,8 +1248,8 @@ class ModelManager(object): using_fp16 = self.precision == "float16" vae_args.update( - cache_dir=config.cache_dir, - local_files_only=not config.internet_available, + cache_dir=self.globals.cache_dir, + local_files_only=not self.globals.internet_available, ) self.logger.debug(f"Loading diffusers VAE from {name_or_path}") @@ -1294,7 +1287,7 @@ class ModelManager(object): @classmethod def _delete_model_from_cache(cls,repo_id): - cache_info = scan_cache_dir(config.cache_dir) + cache_info = scan_cache_dir(get_invokeai_config().cache_dir) # I'm sure there is a way to do this with comprehensions # but the code quickly became incomprehensible! @@ -1311,9 +1304,10 @@ class ModelManager(object): @staticmethod def _abs_path(path: str | Path) -> Path: + globals = get_invokeai_config() if path is None or Path(path).is_absolute(): return path - return Path(config.root, path).resolve() + return Path(globals.root_dir, path).resolve() @staticmethod def _is_huggingface_hub_directory_present() -> bool: diff --git a/invokeai/backend/prompting/conditioning.py b/invokeai/backend/prompting/conditioning.py index fe2a553015..2e62853872 100644 --- a/invokeai/backend/prompting/conditioning.py +++ b/invokeai/backend/prompting/conditioning.py @@ -25,8 +25,6 @@ from invokeai.app.services.config import get_invokeai_config from ..stable_diffusion import InvokeAIDiffuserComponent from ..util import torch_dtype -config = get_invokeai_config() - def get_uc_and_c_and_ec(prompt_string, model: InvokeAIDiffuserComponent, log_tokens=False, skip_normalize_legacy_blend=False): @@ -40,6 +38,8 @@ def get_uc_and_c_and_ec(prompt_string, dtype_for_device_getter=torch_dtype, truncate_long_prompts=False, ) + + config = get_invokeai_config() # get rid of any newline characters prompt_string = prompt_string.replace("\n", " ") diff --git a/invokeai/backend/restoration/codeformer.py b/invokeai/backend/restoration/codeformer.py index d6dd16e47c..b7073f8f8b 100644 --- a/invokeai/backend/restoration/codeformer.py +++ b/invokeai/backend/restoration/codeformer.py @@ -7,7 +7,6 @@ import torch import invokeai.backend.util.logging as logger from invokeai.app.services.config import get_invokeai_config -config = get_invokeai_config() pretrained_model_url = ( "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth" @@ -18,11 +17,11 @@ class CodeFormerRestoration: def __init__( self, codeformer_dir="models/codeformer", codeformer_model_path="codeformer.pth" ) -> None: - if not os.path.isabs(codeformer_dir): - codeformer_dir = os.path.join(config.root, codeformer_dir) - self.model_path = os.path.join(codeformer_dir, codeformer_model_path) - self.codeformer_model_exists = os.path.isfile(self.model_path) + self.globals = get_invokeai_config() + codeformer_dir = self.globals.root_dir / codeformer_dir + self.model_path = codeformer_dir / codeformer_model_path + self.codeformer_model_exists = self.model_path.exists() if not self.codeformer_model_exists: logger.error("NOT FOUND: CodeFormer model not found at " + self.model_path) @@ -72,9 +71,7 @@ class CodeFormerRestoration: upscale_factor=1, use_parse=True, device=device, - model_rootpath=os.path.join( - config.root, "models", "gfpgan", "weights" - ), + model_rootpath = self.globals.root_dir / "gfpgan" / "weights" ) face_helper.clean_all() face_helper.read_image(bgr_image_array) diff --git a/invokeai/backend/restoration/gfpgan.py b/invokeai/backend/restoration/gfpgan.py index 72db199878..063feaa89a 100644 --- a/invokeai/backend/restoration/gfpgan.py +++ b/invokeai/backend/restoration/gfpgan.py @@ -8,14 +8,12 @@ from PIL import Image import invokeai.backend.util.logging as logger from invokeai.app.services.config import get_invokeai_config -config = get_invokeai_config() class GFPGAN: def __init__(self, gfpgan_model_path="models/gfpgan/GFPGANv1.4.pth") -> None: + self.globals = get_invokeai_config() if not os.path.isabs(gfpgan_model_path): - gfpgan_model_path = os.path.abspath( - os.path.join(config.root, gfpgan_model_path) - ) + gfpgan_model_path = self.globals.root_dir / gfpgan_model_path self.model_path = gfpgan_model_path self.gfpgan_model_exists = os.path.isfile(self.model_path) @@ -34,7 +32,7 @@ class GFPGAN: warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=UserWarning) cwd = os.getcwd() - os.chdir(os.path.join(config.root, "models")) + os.chdir(self.globals.root_dir / 'models') try: from gfpgan import GFPGANer diff --git a/invokeai/backend/restoration/realesrgan.py b/invokeai/backend/restoration/realesrgan.py index 9f26cc63ac..c6c6d2d3b4 100644 --- a/invokeai/backend/restoration/realesrgan.py +++ b/invokeai/backend/restoration/realesrgan.py @@ -1,4 +1,3 @@ -import os import warnings import numpy as np @@ -7,7 +6,8 @@ from PIL import Image from PIL.Image import Image as ImageType import invokeai.backend.util.logging as logger -from invokeai.backend.globals import Globals +from invokeai.app.services.config import get_invokeai_config +config = get_invokeai_config() class ESRGAN: def __init__(self, bg_tile_size=400) -> None: @@ -30,12 +30,8 @@ class ESRGAN: upscale=4, act_type="prelu", ) - model_path = os.path.join( - Globals.root, "models/realesrgan/realesr-general-x4v3.pth" - ) - wdn_model_path = os.path.join( - Globals.root, "models/realesrgan/realesr-general-wdn-x4v3.pth" - ) + model_path = config.root_dir / "models/realesrgan/realesr-general-x4v3.pth" + wdn_model_path = config.root_dir / "models/realesrgan/realesr-general-wdn-x4v3.pth" scale = 4 bg_upsampler = RealESRGANer( diff --git a/invokeai/backend/safety_checker.py b/invokeai/backend/safety_checker.py index a7a0a72f1c..55e8eb1987 100644 --- a/invokeai/backend/safety_checker.py +++ b/invokeai/backend/safety_checker.py @@ -18,8 +18,6 @@ import invokeai.backend.util.logging as logger from invokeai.app.services.config import get_invokeai_config from .util import CPU_DEVICE -config = get_invokeai_config() - class SafetyChecker(object): CAUTION_IMG = "caution.png" @@ -28,7 +26,8 @@ class SafetyChecker(object): caution = Image.open(path) self.caution_img = caution.resize((caution.width // 2, caution.height // 2)) self.device = device - + config = get_invokeai_config() + try: safety_model_id = "CompVis/stable-diffusion-safety-checker" safety_model_path = config.cache_dir diff --git a/invokeai/backend/stable_diffusion/concepts_lib.py b/invokeai/backend/stable_diffusion/concepts_lib.py index be897a9a24..beb884b012 100644 --- a/invokeai/backend/stable_diffusion/concepts_lib.py +++ b/invokeai/backend/stable_diffusion/concepts_lib.py @@ -19,14 +19,14 @@ from huggingface_hub import ( import invokeai.backend.util.logging as logger from invokeai.app.services.config import get_invokeai_config -config = get_invokeai_config() class HuggingFaceConceptsLibrary(object): def __init__(self, root=None): """ Initialize the Concepts object. May optionally pass a root directory. """ - self.root = root or config.root + self.config = get_invokeai_config() + self.root = root or self.config.root self.hf_api = HfApi() self.local_concepts = dict() self.concept_list = None @@ -58,7 +58,7 @@ class HuggingFaceConceptsLibrary(object): self.concept_list.extend(list(local_concepts_to_add)) return self.concept_list return self.concept_list - elif config.internet_available is True: + elif self.config.internet_available is True: try: models = self.hf_api.list_models( filter=ModelFilter(model_name="sd-concepts-library/") diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index b9fc5946c0..4ca2a5cb30 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -43,8 +43,6 @@ from .diffusion import ( from .offloading import FullyLoadedModelGroup, LazilyLoadedModelGroup, ModelGroup from .textual_inversion_manager import TextualInversionManager -config = get_invokeai_config() - @dataclass class PipelineIntermediateState: run_id: str @@ -348,6 +346,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): """ if xformers is available, use it, otherwise use sliced attention. """ + config = get_invokeai_config() if ( torch.cuda.is_available() and is_xformers_available() diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 7970bc8691..4131837b41 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -32,8 +32,6 @@ ModelForwardCallback: TypeAlias = Union[ Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor], ] -config = get_invokeai_config() - @dataclass(frozen=True) class PostprocessingSettings: threshold: float @@ -74,6 +72,7 @@ class InvokeAIDiffuserComponent: :param model: the unet model to pass through to cross attention control :param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning) """ + config = get_invokeai_config() self.conditioning = None self.model = model self.is_running_diffusers = is_running_diffusers diff --git a/invokeai/backend/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py index d68a2e34c9..8c27a6e718 100644 --- a/invokeai/backend/training/textual_inversion_training.py +++ b/invokeai/backend/training/textual_inversion_training.py @@ -7,7 +7,6 @@ This is the backend to "textual_inversion.py" """ -import argparse import logging import math import os @@ -47,7 +46,7 @@ from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer # invokeai stuff -from invokeai.app.services.config import InvokeAIAppConfig +from invokeai.app.services.config import InvokeAIAppConfig,PagingArgumentParser if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { @@ -89,10 +88,9 @@ def save_progress( def parse_args(): - config = InvokeAIAppConfig() - + config = InvokeAIAppConfig(argv=[]) parser = PagingArgumentParser( - description="Textual inversion training", formatter_class=ArgFormatter + description="Textual inversion training" ) general_group = parser.add_argument_group("General") model_group = parser.add_argument_group("Models and Paths") @@ -529,6 +527,7 @@ def get_full_repo_name( def do_textual_inversion_training( + config: InvokeAIAppConfig, model: str, train_data_dir: Path, output_dir: Path, @@ -629,7 +628,7 @@ def do_textual_inversion_training( elif output_dir is not None: os.makedirs(output_dir, exist_ok=True) - models_conf = OmegaConf.load(os.path.join(config.root, "configs/models.yaml")) + models_conf = OmegaConf.load(config.model_conf_path) model_conf = models_conf.get(model, None) assert model_conf is not None, f"Unknown model: {model}" assert ( @@ -641,7 +640,7 @@ def do_textual_inversion_training( assert ( pretrained_model_name_or_path ), f"models.yaml error: neither 'repo_id' nor 'path' is defined for {model}" - pipeline_args = dict(cache_dir=config.cache_dir()) + pipeline_args = dict(cache_dir=config.cache_dir) # Load tokenizer if tokenizer_name: diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py index ca712149ea..c6c0819df8 100644 --- a/invokeai/backend/util/devices.py +++ b/invokeai/backend/util/devices.py @@ -9,10 +9,10 @@ from invokeai.app.services.config import get_invokeai_config CPU_DEVICE = torch.device("cpu") CUDA_DEVICE = torch.device("cuda") MPS_DEVICE = torch.device("mps") -config = get_invokeai_config() def choose_torch_device() -> torch.device: """Convenience routine for guessing which GPU device to run model on""" + config = get_invokeai_config() if config.always_use_cpu: return CPU_DEVICE if torch.cuda.is_available(): @@ -32,6 +32,7 @@ def choose_precision(device: torch.device) -> str: def torch_dtype(device: torch.device) -> torch.dtype: + config = get_invokeai_config() if config.full_precision: return torch.float32 if choose_precision(device) == "float16": diff --git a/invokeai/frontend/training/textual_inversion.py b/invokeai/frontend/training/textual_inversion.py index 23134d2736..90e402f48b 100755 --- a/invokeai/frontend/training/textual_inversion.py +++ b/invokeai/frontend/training/textual_inversion.py @@ -21,14 +21,17 @@ from npyscreen import widget from omegaconf import OmegaConf import invokeai.backend.util.logging as logger -from invokeai.backend.globals import Globals, global_set_root -from ...backend.training import do_textual_inversion_training, parse_args +from invokeai.app.services.config import get_invokeai_config +from ...backend.training import ( + do_textual_inversion_training, + parse_args +) TRAINING_DATA = "text-inversion-training-data" TRAINING_DIR = "text-inversion-output" CONF_FILE = "preferences.conf" - +config = None class textualInversionForm(npyscreen.FormMultiPageAction): resolutions = [512, 768, 1024] @@ -122,7 +125,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction): value=str( saved_args.get( "train_data_dir", - Path(Globals.root) / TRAINING_DATA / default_placeholder_token, + config.root_dir / TRAINING_DATA / default_placeholder_token, ) ), scroll_exit=True, @@ -135,7 +138,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction): value=str( saved_args.get( "output_dir", - Path(Globals.root) / TRAINING_DIR / default_placeholder_token, + config.root_dir / TRAINING_DIR / default_placeholder_token, ) ), scroll_exit=True, @@ -241,9 +244,9 @@ class textualInversionForm(npyscreen.FormMultiPageAction): placeholder = self.placeholder_token.value self.prompt_token.value = f"(Trigger by using <{placeholder}> in your prompts)" self.train_data_dir.value = str( - Path(Globals.root) / TRAINING_DATA / placeholder + config.root_dir / TRAINING_DATA / placeholder ) - self.output_dir.value = str(Path(Globals.root) / TRAINING_DIR / placeholder) + self.output_dir.value = str(config.root_dir / TRAINING_DIR / placeholder) self.resume_from_checkpoint.value = Path(self.output_dir.value).exists() def on_ok(self): @@ -284,7 +287,7 @@ class textualInversionForm(npyscreen.FormMultiPageAction): return True def get_model_names(self) -> Tuple[List[str], int]: - conf = OmegaConf.load(os.path.join(Globals.root, "configs/models.yaml")) + conf = OmegaConf.load(config.root_dir / "configs/models.yaml") model_names = [ idx for idx in sorted(list(conf.keys())) @@ -367,7 +370,7 @@ def copy_to_embeddings_folder(args: dict): """ source = Path(args["output_dir"], "learned_embeds.bin") dest_dir_name = args["placeholder_token"].strip("<>") - destination = Path(Globals.root, "embeddings", dest_dir_name) + destination = config.root_dir / "embeddings" / dest_dir_name os.makedirs(destination, exist_ok=True) logger.info(f"Training completed. Copying learned_embeds.bin into {str(destination)}") shutil.copy(source, destination) @@ -383,7 +386,7 @@ def save_args(args: dict): """ Save the current argument values to an omegaconf file """ - dest_dir = Path(Globals.root) / TRAINING_DIR + dest_dir = config.root_dir / TRAINING_DIR os.makedirs(dest_dir, exist_ok=True) conf_file = dest_dir / CONF_FILE conf = OmegaConf.create(args) @@ -394,7 +397,7 @@ def previous_args() -> dict: """ Get the previous arguments used. """ - conf_file = Path(Globals.root) / TRAINING_DIR / CONF_FILE + conf_file = config.root_dir / TRAINING_DIR / CONF_FILE try: conf = OmegaConf.load(conf_file) conf["placeholder_token"] = conf["placeholder_token"].strip("<>") @@ -420,7 +423,7 @@ def do_front_end(args: Namespace): save_args(args) try: - do_textual_inversion_training(**args) + do_textual_inversion_training(get_invokeai_config(),**args) copy_to_embeddings_folder(args) except Exception as e: logger.error("An exception occurred during training. The exception was:") @@ -430,13 +433,20 @@ def do_front_end(args: Namespace): def main(): + global config + args = parse_args() - global_set_root(args.root_dir or Globals.root) + config = get_invokeai_config(argv=[]) + + # change root if needed + if args.root_dir: + config.root = args.root_dir + try: if args.front_end: do_front_end(args) else: - do_textual_inversion_training(**vars(args)) + do_textual_inversion_training(config,**vars(args)) except AssertionError as e: logger.error(e) sys.exit(-1) diff --git a/tests/preflight_prompts.txt b/tests/preflight_prompts.txt deleted file mode 100644 index cf6e5bc07c..0000000000 --- a/tests/preflight_prompts.txt +++ /dev/null @@ -1,4 +0,0 @@ -banana sushi -Ak_lms -W640 -H480 -S42 -s20 -banana sushi -Ak_lms -S42 -G1 -U 2 0.5 -s20 -banana sushi -Ak_lms -S42 -v0.2 -n3 -s20 -banana sushi -Ak_lms -S42 -V1349749425:0.1,4145759947:0.1 -s20 diff --git a/tests/test_config.py b/tests/test_config.py index 10cc86fd65..6d0586213e 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -58,6 +58,11 @@ def test_env_override(): conf = InvokeAIAppConfig(conf=init1,argv=['--max_loaded=10']) assert conf.nsfw_checker==True + # environment variables should be case insensitive + os.environ['InvokeAI_Max_Loaded_Models'] = '15' + conf = InvokeAIAppConfig(conf=init1) + assert conf.max_loaded_models == 15 + conf = InvokeAIAppConfig(conf=init1,argv=['--no-nsfw_checker','--max_loaded=10']) assert conf.nsfw_checker==False assert conf.max_loaded_models==10 diff --git a/tests/validate_pr_prompt.txt b/tests/validate_pr_prompt.txt index f5e54edb99..8607987864 100644 --- a/tests/validate_pr_prompt.txt +++ b/tests/validate_pr_prompt.txt @@ -1,4 +1,3 @@ t2i --positive_prompt 'banana sushi' --seed 42 compel --prompt 'strawberry sushi' | compel | noise | t2l --scheduler heun --steps 3 --scheduler ddim --link -3 conditioning positive_conditioning --link -2 conditioning negative_conditioning | l2i -compel --prompt 'banana sushi' | compel | noise | t2i --scheduler heun --steps 3 --scheduler euler_a --link -3 conditioning positive_conditioning --link -2 conditioning negative_conditioning - +compel --prompt 'banana sushi' | compel | noise | t2l --scheduler heun --steps 3 --scheduler euler_a --link -3 conditioning positive_conditioning --link -2 conditioning negative_conditioning