From 2273b3a8c8707b50490fc6d96f6926c800591ca4 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 25 May 2023 20:41:26 -0400 Subject: [PATCH 1/2] fix potential race condition in config system --- invokeai/app/api_app.py | 3 +- invokeai/app/cli_app.py | 5 +- invokeai/app/services/config.py | 63 ++++++++++--------- invokeai/app/services/sqlite.py | 1 - invokeai/backend/config/invokeai_configure.py | 11 ++-- .../backend/config/model_install_backend.py | 4 +- invokeai/backend/image_util/patchmatch.py | 4 +- invokeai/backend/image_util/txt2mask.py | 4 +- .../convert_ckpt_to_diffusers.py | 10 +-- .../backend/model_management/model_manager.py | 10 +-- invokeai/backend/prompting/conditioning.py | 6 +- invokeai/backend/restoration/codeformer.py | 4 +- invokeai/backend/restoration/gfpgan.py | 4 +- invokeai/backend/restoration/realesrgan.py | 4 +- invokeai/backend/safety_checker.py | 5 +- .../backend/stable_diffusion/concepts_lib.py | 7 ++- .../stable_diffusion/diffusers_pipeline.py | 4 +- .../diffusion/shared_invokeai_diffusion.py | 40 ++++++------ invokeai/backend/util/devices.py | 5 +- invokeai/frontend/install/model_install.py | 4 +- invokeai/frontend/merge/merge_diffusers.py | 4 +- .../frontend/training/textual_inversion.py | 6 +- tests/test_config.py | 32 ++++++---- 23 files changed, 128 insertions(+), 112 deletions(-) diff --git a/invokeai/app/api_app.py b/invokeai/app/api_app.py index 69d322578d..96a22466b5 100644 --- a/invokeai/app/api_app.py +++ b/invokeai/app/api_app.py @@ -39,7 +39,8 @@ socket_io = SocketIO(app) # initialize config # this is a module global -app_config = InvokeAIAppConfig() +app_config = InvokeAIAppConfig.get_config() +app_config.parse_args() # Add startup event to load dependencies @app.on_event("startup") diff --git a/invokeai/app/cli_app.py b/invokeai/app/cli_app.py index de543d2d85..5f0d22a7d1 100644 --- a/invokeai/app/cli_app.py +++ b/invokeai/app/cli_app.py @@ -37,7 +37,7 @@ from .services.invocation_services import InvocationServices from .services.invoker import Invoker from .services.processor import DefaultInvocationProcessor from .services.sqlite import SqliteItemStorage -from .services.config import get_invokeai_config +from .services.config import InvokeAIAppConfig class CliCommand(BaseModel): command: Union[BaseCommand.get_commands() + BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore @@ -196,7 +196,8 @@ logger = logger.InvokeAILogger.getLogger() def invoke_cli(): # this gets the basic configuration - config = get_invokeai_config() + config = InvokeAIAppConfig.get_config() + config.parse_args() # get the optional list of invocations to execute on the command line parser = config.get_parser() diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index 49e0b6bed4..25b2241d58 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -82,13 +82,10 @@ Typical usage: from invokeai.invocations.generate import TextToImageInvocation # get global configuration and print its nsfw_checker value - conf = InvokeAIAppConfig() + conf = InvokeAIAppConfig.get_config() + conf.parse_args() print(conf.nsfw_checker) - # get the text2image invocation and print its step value - text2image = TextToImageInvocation() - print(text2image.steps) - Computed properties: The InvokeAIAppConfig object has a series of properties that @@ -103,10 +100,11 @@ a Path object: lora_path - path to the LoRA directory In most cases, you will want to create a single InvokeAIAppConfig -object for the entire application. The get_invokeai_config() function +object for the entire application. The InvokeAIAppConfig.get_config() function does this: - config = get_invokeai_config() + config = InvokeAIAppConfig.get_config() + config.parse_args() # read values from the command line/config file print(config.root) # Subclassing @@ -141,6 +139,7 @@ two configs are kept in separate sections of the config file: outdir: outputs ... ''' +from __future__ import annotations import argparse import pydoc import typing @@ -155,9 +154,6 @@ from typing import Any, ClassVar, Dict, List, Literal, Type, Union, get_origin, INIT_FILE = Path('invokeai.yaml') LEGACY_INIT_FILE = Path('invokeai.init') -# This global stores a singleton InvokeAIAppConfig configuration object -global_config = None - class InvokeAISettings(BaseSettings): ''' Runtime configuration settings in which default values are @@ -330,6 +326,9 @@ the command-line client (recommended for experts only), or can be changed by editing the file "INVOKEAI_ROOT/invokeai.yaml" or by setting environment variables INVOKEAI_. ''' + singleton_config: ClassVar[InvokeAIAppConfig] = None + singleton_init: ClassVar[Dict] = None + #fmt: off type: Literal["InvokeAI"] = "InvokeAI" host : str = Field(default="127.0.0.1", description="IP address to bind to", category='Web Server') @@ -369,33 +368,44 @@ setting environment variables INVOKEAI_. embeddings : bool = Field(default=True, description='Load contents of embeddings directory', category='Models') #fmt: on - def __init__(self, conf: DictConfig = None, argv: List[str]=None, **kwargs): + def parse_args(self, argv: List[str]=None, conf: DictConfig = None, clobber=False): ''' - Initialize InvokeAIAppconfig. + Update settings with contents of init file, environment, and + command-line settings. :param conf: alternate Omegaconf dictionary object :param argv: aternate sys.argv list - :param **kwargs: attributes to initialize with + :param clobber: ovewrite any initialization parameters passed during initialization ''' - super().__init__(**kwargs) - # Set the runtime root directory. We parse command-line switches here # in order to pick up the --root_dir option. - self.parse_args(argv) + super().parse_args(argv) if conf is None: try: conf = OmegaConf.load(self.root_dir / INIT_FILE) except: pass InvokeAISettings.initconf = conf - + # parse args again in order to pick up settings in configuration file - self.parse_args(argv) + super().parse_args(argv) - # restore initialization values - hints = get_type_hints(self) - for k in kwargs: - setattr(self,k,parse_obj_as(hints[k],kwargs[k])) + if self.singleton_init and not clobber: + hints = get_type_hints(self.__class__) + for k in self.singleton_init: + setattr(self,k,parse_obj_as(hints[k],self.singleton_init[k])) + @classmethod + def get_config(cls,**kwargs)->InvokeAIAppConfig: + ''' + This returns a singleton InvokeAIAppConfig configuration object. + ''' + if cls.singleton_config is None \ + or type(cls.singleton_config)!=cls \ + or (kwargs and cls.singleton_init != kwargs): + cls.singleton_config = cls(**kwargs) + cls.singleton_init = kwargs + return cls.singleton_config + @property def root_path(self)->Path: ''' @@ -513,11 +523,8 @@ class PagingArgumentParser(argparse.ArgumentParser): text = self.format_help() pydoc.pager(text) -def get_invokeai_config(cls:Type[InvokeAISettings]=InvokeAIAppConfig,**kwargs)->InvokeAIAppConfig: +def get_invokeai_config(**kwargs)->InvokeAIAppConfig: ''' - This returns a singleton InvokeAIAppConfig configuration object. + Legacy function which returns InvokeAIAppConfig.get_config() ''' - global global_config - if global_config is None or type(global_config)!=cls: - global_config = cls(**kwargs) - return global_config + return InvokeAIAppConfig.get_config(**kwargs) diff --git a/invokeai/app/services/sqlite.py b/invokeai/app/services/sqlite.py index fd089014bb..a62fff88a1 100644 --- a/invokeai/app/services/sqlite.py +++ b/invokeai/app/services/sqlite.py @@ -26,7 +26,6 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]): self._table_name = table_name self._id_field = id_field # TODO: validate that T has this field self._lock = Lock() - self._conn = sqlite3.connect( self._filename, check_same_thread=False ) # TODO: figure out a better threading solution diff --git a/invokeai/backend/config/invokeai_configure.py b/invokeai/backend/config/invokeai_configure.py index 59f11d35bc..cf38dd93a6 100755 --- a/invokeai/backend/config/invokeai_configure.py +++ b/invokeai/backend/config/invokeai_configure.py @@ -51,10 +51,7 @@ from invokeai.backend.config.model_install_backend import ( hf_download_with_resume, recommended_datasets, ) -from invokeai.app.services.config import ( - get_invokeai_config, - InvokeAIAppConfig, -) +from invokeai.app.services.config import InvokeAIAppConfig warnings.filterwarnings("ignore") @@ -62,7 +59,7 @@ transformers.logging.set_verbosity_error() # --------------------------globals----------------------- -config = get_invokeai_config() +config = InvokeAIAppConfig.get_config() Model_dir = "models" Weights_dir = "ldm/stable-diffusion-v1/" @@ -820,8 +817,8 @@ def main(): if old_init_file.exists() and not new_init_file.exists(): print('** Migrating invokeai.init to invokeai.yaml') migrate_init_file(old_init_file) - config = get_invokeai_config() # reread defaults - + # Load new init file into config + config.parse_args(argv=[],conf=OmegaConf.load(new_init_file)) if not config.model_conf_path.exists(): initialize_rootdir(config.root, opt.yes_to_all) diff --git a/invokeai/backend/config/model_install_backend.py b/invokeai/backend/config/model_install_backend.py index cb76f955bc..96468dee4b 100644 --- a/invokeai/backend/config/model_install_backend.py +++ b/invokeai/backend/config/model_install_backend.py @@ -19,7 +19,7 @@ from tqdm import tqdm import invokeai.configs as configs -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig from ..model_management import ModelManager from ..stable_diffusion import StableDiffusionGeneratorPipeline @@ -27,7 +27,7 @@ from ..stable_diffusion import StableDiffusionGeneratorPipeline warnings.filterwarnings("ignore") # --------------------------globals----------------------- -config = get_invokeai_config() +config = InvokeAIAppConfig.get_config() Model_dir = "models" Weights_dir = "ldm/stable-diffusion-v1/" diff --git a/invokeai/backend/image_util/patchmatch.py b/invokeai/backend/image_util/patchmatch.py index 0d2221be41..2e65f08d9f 100644 --- a/invokeai/backend/image_util/patchmatch.py +++ b/invokeai/backend/image_util/patchmatch.py @@ -6,7 +6,8 @@ be suppressed or deferred """ import numpy as np import invokeai.backend.util.logging as logger -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig +config = InvokeAIAppConfig.get_config() class PatchMatch: """ @@ -21,7 +22,6 @@ class PatchMatch: @classmethod def _load_patch_match(self): - config = get_invokeai_config() if self.tried_load: return if config.try_patchmatch: diff --git a/invokeai/backend/image_util/txt2mask.py b/invokeai/backend/image_util/txt2mask.py index 1a8fcfeb90..429c9b63fb 100644 --- a/invokeai/backend/image_util/txt2mask.py +++ b/invokeai/backend/image_util/txt2mask.py @@ -33,10 +33,11 @@ from PIL import Image, ImageOps from transformers import AutoProcessor, CLIPSegForImageSegmentation import invokeai.backend.util.logging as logger -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined" CLIPSEG_SIZE = 352 +config = InvokeAIAppConfig.get_config() class SegmentedGrayscale(object): def __init__(self, image: Image, heatmap: torch.Tensor): @@ -83,7 +84,6 @@ class Txt2Mask(object): def __init__(self, device="cpu", refined=False): logger.info("Initializing clipseg model for text to mask inference") - config = get_invokeai_config() # BUG: we are not doing anything with the device option at this time self.device = device diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 467fe39155..acf93d9ab6 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -26,7 +26,7 @@ import torch from safetensors.torch import load_file import invokeai.backend.util.logging as logger -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig from .model_manager import ModelManager, SDLegacyType @@ -842,7 +842,7 @@ def convert_ldm_bert_checkpoint(checkpoint, config): def convert_ldm_clip_checkpoint(checkpoint): text_model = CLIPTextModel.from_pretrained( - "openai/clip-vit-large-patch14", cache_dir=get_invokeai_config().cache_dir + "openai/clip-vit-large-patch14", cache_dir=InvokeAIAppConfig.get_config().cache_dir ) keys = list(checkpoint.keys()) @@ -897,7 +897,7 @@ textenc_pattern = re.compile("|".join(protected.keys())) def convert_paint_by_example_checkpoint(checkpoint): - cache_dir = get_invokeai_config().cache_dir + cache_dir = InvokeAIAppConfig.get_config().cache_dir config = CLIPVisionConfig.from_pretrained( "openai/clip-vit-large-patch14", cache_dir=cache_dir ) @@ -969,7 +969,7 @@ def convert_paint_by_example_checkpoint(checkpoint): def convert_open_clip_checkpoint(checkpoint): - cache_dir = get_invokeai_config().cache_dir + cache_dir = InvokeAIAppConfig.get_config().cache_dir text_model = CLIPTextModel.from_pretrained( "stabilityai/stable-diffusion-2", subfolder="text_encoder", cache_dir=cache_dir ) @@ -1092,7 +1092,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt( :param vae: A diffusers VAE to load into the pipeline. :param vae_path: Path to a checkpoint VAE that will be converted into diffusers and loaded into the pipeline. """ - config = get_invokeai_config() + config = InvokeAIAppConfig.get_config() with warnings.catch_warnings(): warnings.simplefilter("ignore") verbosity = dlogging.get_verbosity() diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index bdbca195bd..8e80c0b5c4 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -47,7 +47,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import ( from ..stable_diffusion import ( StableDiffusionGeneratorPipeline, ) -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig from ..util import CUDA_DEVICE, ask_user, download_with_resume class SDLegacyType(Enum): @@ -98,7 +98,7 @@ class ModelManager(object): if not isinstance(config, DictConfig): config = OmegaConf.load(config) self.config = config - self.globals = get_invokeai_config() + self.globals = InvokeAIAppConfig.get_config() self.precision = precision self.device = torch.device(device_type) self.max_loaded_models = max_loaded_models @@ -1057,7 +1057,7 @@ class ModelManager(object): """ # Three transformer models to check: bert, clip and safety checker, and # the diffusers as well - config = get_invokeai_config() + config = InvokeAIAppConfig.get_config() models_dir = config.root_dir / "models" legacy_locations = [ Path( @@ -1287,7 +1287,7 @@ class ModelManager(object): @classmethod def _delete_model_from_cache(cls,repo_id): - cache_info = scan_cache_dir(get_invokeai_config().cache_dir) + cache_info = scan_cache_dir(InvokeAIAppConfig.get_config().cache_dir) # I'm sure there is a way to do this with comprehensions # but the code quickly became incomprehensible! @@ -1304,7 +1304,7 @@ class ModelManager(object): @staticmethod def _abs_path(path: str | Path) -> Path: - globals = get_invokeai_config() + globals = InvokeAIAppConfig.get_config() if path is None or Path(path).is_absolute(): return path return Path(globals.root_dir, path).resolve() diff --git a/invokeai/backend/prompting/conditioning.py b/invokeai/backend/prompting/conditioning.py index 2e62853872..7a26be9800 100644 --- a/invokeai/backend/prompting/conditioning.py +++ b/invokeai/backend/prompting/conditioning.py @@ -21,10 +21,12 @@ from compel.prompt_parser import ( import invokeai.backend.util.logging as logger -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig from ..stable_diffusion import InvokeAIDiffuserComponent from ..util import torch_dtype +config = InvokeAIAppConfig.get_config() + def get_uc_and_c_and_ec(prompt_string, model: InvokeAIDiffuserComponent, log_tokens=False, skip_normalize_legacy_blend=False): @@ -39,8 +41,6 @@ def get_uc_and_c_and_ec(prompt_string, truncate_long_prompts=False, ) - config = get_invokeai_config() - # get rid of any newline characters prompt_string = prompt_string.replace("\n", " ") positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string) diff --git a/invokeai/backend/restoration/codeformer.py b/invokeai/backend/restoration/codeformer.py index b7073f8f8b..2a39a5c365 100644 --- a/invokeai/backend/restoration/codeformer.py +++ b/invokeai/backend/restoration/codeformer.py @@ -6,7 +6,7 @@ import numpy as np import torch import invokeai.backend.util.logging as logger -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig pretrained_model_url = ( "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth" @@ -18,7 +18,7 @@ class CodeFormerRestoration: self, codeformer_dir="models/codeformer", codeformer_model_path="codeformer.pth" ) -> None: - self.globals = get_invokeai_config() + self.globals = InvokeAIAppConfig.get_config() codeformer_dir = self.globals.root_dir / codeformer_dir self.model_path = codeformer_dir / codeformer_model_path self.codeformer_model_exists = self.model_path.exists() diff --git a/invokeai/backend/restoration/gfpgan.py b/invokeai/backend/restoration/gfpgan.py index 063feaa89a..5021e8d7d2 100644 --- a/invokeai/backend/restoration/gfpgan.py +++ b/invokeai/backend/restoration/gfpgan.py @@ -7,11 +7,11 @@ import torch from PIL import Image import invokeai.backend.util.logging as logger -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig class GFPGAN: def __init__(self, gfpgan_model_path="models/gfpgan/GFPGANv1.4.pth") -> None: - self.globals = get_invokeai_config() + self.globals = InvokeAIAppConfig.get_config() if not os.path.isabs(gfpgan_model_path): gfpgan_model_path = self.globals.root_dir / gfpgan_model_path self.model_path = gfpgan_model_path diff --git a/invokeai/backend/restoration/realesrgan.py b/invokeai/backend/restoration/realesrgan.py index c6c6d2d3b4..e08978adc2 100644 --- a/invokeai/backend/restoration/realesrgan.py +++ b/invokeai/backend/restoration/realesrgan.py @@ -6,8 +6,8 @@ from PIL import Image from PIL.Image import Image as ImageType import invokeai.backend.util.logging as logger -from invokeai.app.services.config import get_invokeai_config -config = get_invokeai_config() +from invokeai.app.services.config import InvokeAIAppConfig +config = InvokeAIAppConfig.get_config() class ESRGAN: def __init__(self, bg_tile_size=400) -> None: diff --git a/invokeai/backend/safety_checker.py b/invokeai/backend/safety_checker.py index 55e8eb1987..5ff69fe86c 100644 --- a/invokeai/backend/safety_checker.py +++ b/invokeai/backend/safety_checker.py @@ -15,9 +15,11 @@ from transformers import AutoFeatureExtractor import invokeai.assets.web as web_assets import invokeai.backend.util.logging as logger -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig from .util import CPU_DEVICE +config = InvokeAIAppConfig.get_config() + class SafetyChecker(object): CAUTION_IMG = "caution.png" @@ -26,7 +28,6 @@ class SafetyChecker(object): caution = Image.open(path) self.caution_img = caution.resize((caution.width // 2, caution.height // 2)) self.device = device - config = get_invokeai_config() try: safety_model_id = "CompVis/stable-diffusion-safety-checker" diff --git a/invokeai/backend/stable_diffusion/concepts_lib.py b/invokeai/backend/stable_diffusion/concepts_lib.py index beb884b012..5294150783 100644 --- a/invokeai/backend/stable_diffusion/concepts_lib.py +++ b/invokeai/backend/stable_diffusion/concepts_lib.py @@ -17,15 +17,16 @@ from huggingface_hub import ( hf_hub_url, ) -import invokeai.backend.util.logging as logger -from invokeai.app.services.config import get_invokeai_config +from invokeai.backend.util.logging import InvokeAILogger +from invokeai.app.services.config import InvokeAIAppConfig +logger = InvokeAILogger.getLogger() class HuggingFaceConceptsLibrary(object): def __init__(self, root=None): """ Initialize the Concepts object. May optionally pass a root directory. """ - self.config = get_invokeai_config() + self.config = InvokeAIAppConfig.get_config() self.root = root or self.config.root self.hf_api = HfApi() self.local_concepts = dict() diff --git a/invokeai/backend/stable_diffusion/diffusers_pipeline.py b/invokeai/backend/stable_diffusion/diffusers_pipeline.py index 4ca2a5cb30..1f188d8dd9 100644 --- a/invokeai/backend/stable_diffusion/diffusers_pipeline.py +++ b/invokeai/backend/stable_diffusion/diffusers_pipeline.py @@ -33,7 +33,7 @@ from torchvision.transforms.functional import resize as tv_resize from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from typing_extensions import ParamSpec -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig from ..util import CPU_DEVICE, normalize_device from .diffusion import ( AttentionMapSaver, @@ -346,7 +346,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): """ if xformers is available, use it, otherwise use sliced attention. """ - config = get_invokeai_config() + config = InvokeAIAppConfig.get_config() if ( torch.cuda.is_available() and is_xformers_available() diff --git a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py index 4131837b41..ea01301fa0 100644 --- a/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py +++ b/invokeai/backend/stable_diffusion/diffusion/shared_invokeai_diffusion.py @@ -10,7 +10,7 @@ from diffusers.models.attention_processor import AttentionProcessor from typing_extensions import TypeAlias import invokeai.backend.util.logging as logger -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig from .cross_attention_control import ( Arguments, @@ -72,7 +72,7 @@ class InvokeAIDiffuserComponent: :param model: the unet model to pass through to cross attention control :param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning) """ - config = get_invokeai_config() + config = InvokeAIAppConfig.get_config() self.conditioning = None self.model = model self.is_running_diffusers = is_running_diffusers @@ -112,23 +112,25 @@ class InvokeAIDiffuserComponent: # TODO resuscitate attention map saving # self.remove_attention_map_saving() - def override_cross_attention( - self, conditioning: ExtraConditioningInfo, step_count: int - ) -> Dict[str, AttentionProcessor]: - """ - setup cross attention .swap control. for diffusers this replaces the attention processor, so - the previous attention processor is returned so that the caller can restore it later. - """ - self.conditioning = conditioning - self.cross_attention_control_context = Context( - arguments=self.conditioning.cross_attention_control_args, - step_count=step_count, - ) - return override_cross_attention( - self.model, - self.cross_attention_control_context, - is_running_diffusers=self.is_running_diffusers, - ) + # apparently unused code + # TODO: delete + # def override_cross_attention( + # self, conditioning: ExtraConditioningInfo, step_count: int + # ) -> Dict[str, AttentionProcessor]: + # """ + # setup cross attention .swap control. for diffusers this replaces the attention processor, so + # the previous attention processor is returned so that the caller can restore it later. + # """ + # self.conditioning = conditioning + # self.cross_attention_control_context = Context( + # arguments=self.conditioning.cross_attention_control_args, + # step_count=step_count, + # ) + # return override_cross_attention( + # self.model, + # self.cross_attention_control_context, + # is_running_diffusers=self.is_running_diffusers, + # ) def restore_default_cross_attention( self, restore_attention_processor: Optional["AttentionProcessor"] = None diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py index c6c0819df8..615209d98d 100644 --- a/invokeai/backend/util/devices.py +++ b/invokeai/backend/util/devices.py @@ -4,15 +4,15 @@ from contextlib import nullcontext import torch from torch import autocast -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig CPU_DEVICE = torch.device("cpu") CUDA_DEVICE = torch.device("cuda") MPS_DEVICE = torch.device("mps") +config = InvokeAIAppConfig.get_config() def choose_torch_device() -> torch.device: """Convenience routine for guessing which GPU device to run model on""" - config = get_invokeai_config() if config.always_use_cpu: return CPU_DEVICE if torch.cuda.is_available(): @@ -32,7 +32,6 @@ def choose_precision(device: torch.device) -> str: def torch_dtype(device: torch.device) -> torch.dtype: - config = get_invokeai_config() if config.full_precision: return torch.float32 if choose_precision(device) == "float16": diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index a283b4952d..375fdf7ba1 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -40,13 +40,13 @@ from .widgets import ( TextBox, set_min_terminal_size, ) -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig # minimum size for the UI MIN_COLS = 120 MIN_LINES = 45 -config = get_invokeai_config() +config = InvokeAIAppConfig.get_config() class addModelsForm(npyscreen.FormMultiPage): # for responsive resizing - disabled diff --git a/invokeai/frontend/merge/merge_diffusers.py b/invokeai/frontend/merge/merge_diffusers.py index 882a4587b6..9da04b97f8 100644 --- a/invokeai/frontend/merge/merge_diffusers.py +++ b/invokeai/frontend/merge/merge_diffusers.py @@ -20,12 +20,12 @@ from npyscreen import widget from omegaconf import OmegaConf import invokeai.backend.util.logging as logger -from invokeai.services.config import get_invokeai_config +from invokeai.services.config import InvokeAIAppConfig from ...backend.model_management import ModelManager from ...frontend.install.widgets import FloatTitleSlider DEST_MERGED_MODEL_DIR = "merged_models" -config = get_invokeai_config() +config = InvokeAIAppConfig.get_config() def merge_diffusion_models( model_ids_or_paths: List[Union[str, Path]], diff --git a/invokeai/frontend/training/textual_inversion.py b/invokeai/frontend/training/textual_inversion.py index 90e402f48b..e1c7b3749f 100755 --- a/invokeai/frontend/training/textual_inversion.py +++ b/invokeai/frontend/training/textual_inversion.py @@ -22,7 +22,7 @@ from omegaconf import OmegaConf import invokeai.backend.util.logging as logger -from invokeai.app.services.config import get_invokeai_config +from invokeai.app.services.config import InvokeAIAppConfig from ...backend.training import ( do_textual_inversion_training, parse_args @@ -423,7 +423,7 @@ def do_front_end(args: Namespace): save_args(args) try: - do_textual_inversion_training(get_invokeai_config(),**args) + do_textual_inversion_training(InvokeAIAppConfig.get_config(),**args) copy_to_embeddings_folder(args) except Exception as e: logger.error("An exception occurred during training. The exception was:") @@ -436,7 +436,7 @@ def main(): global config args = parse_args() - config = get_invokeai_config(argv=[]) + config = InvokeAIAppConfig.get_config() # change root if needed if args.root_dir: diff --git a/tests/test_config.py b/tests/test_config.py index 6d0586213e..2c883d63f5 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -5,7 +5,7 @@ from omegaconf import OmegaConf from pathlib import Path os.environ['INVOKEAI_ROOT']='/tmp' -from invokeai.app.services.config import InvokeAIAppConfig, InvokeAISettings +from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.invocations.generate import TextToImageInvocation init1 = OmegaConf.create( @@ -32,48 +32,56 @@ def test_use_init(): # note that we explicitly set omegaconf dict and argv here # so that the values aren't read from ~invokeai/invokeai.yaml and # sys.argv respectively. - conf1 = InvokeAIAppConfig(init1,[]) + conf1 = InvokeAIAppConfig.get_config() assert conf1 + conf1.parse_args(conf=init1) assert conf1.max_loaded_models==5 assert not conf1.nsfw_checker - conf2 = InvokeAIAppConfig(init2,[]) + conf2 = InvokeAIAppConfig.get_config() assert conf2 + conf2.parse_args(conf=init2) assert conf2.nsfw_checker assert conf2.max_loaded_models==2 assert not hasattr(conf2,'invalid_attribute') def test_argv_override(): - conf = InvokeAIAppConfig(init1,['--nsfw_checker','--max_loaded=10']) + conf = InvokeAIAppConfig.get_config() + conf.parse_args(conf=init1,argv=['--nsfw_checker','--max_loaded=10']) assert conf.nsfw_checker assert conf.max_loaded_models==10 assert conf.outdir==Path('outputs') # this is the default def test_env_override(): # argv overrides - conf = InvokeAIAppConfig(conf=init1,argv=['--max_loaded=10']) + conf = InvokeAIAppConfig() + conf.parse_args(conf=init1,argv=['--max_loaded=10']) assert conf.nsfw_checker==False - os.environ['INVOKEAI_nsfw_checker'] = 'True' - conf = InvokeAIAppConfig(conf=init1,argv=['--max_loaded=10']) + conf.parse_args(conf=init1,argv=['--max_loaded=10']) assert conf.nsfw_checker==True # environment variables should be case insensitive os.environ['InvokeAI_Max_Loaded_Models'] = '15' - conf = InvokeAIAppConfig(conf=init1) + conf = InvokeAIAppConfig() + conf.parse_args(conf=init1) assert conf.max_loaded_models == 15 - conf = InvokeAIAppConfig(conf=init1,argv=['--no-nsfw_checker','--max_loaded=10']) + conf = InvokeAIAppConfig() + conf.parse_args(conf=init1,argv=['--no-nsfw_checker','--max_loaded=10']) assert conf.nsfw_checker==False assert conf.max_loaded_models==10 - conf = InvokeAIAppConfig(conf=init1,argv=[],max_loaded_models=20) + conf = InvokeAIAppConfig.get_config(max_loaded_models=20) + conf.parse_args(conf=init1,argv=[]) assert conf.max_loaded_models==20 def test_type_coercion(): - conf = InvokeAIAppConfig(argv=['--root=/tmp/foobar']) + conf = InvokeAIAppConfig().get_config() + conf.parse_args(argv=['--root=/tmp/foobar']) assert conf.root==Path('/tmp/foobar') assert isinstance(conf.root,Path) - conf = InvokeAIAppConfig(argv=['--root=/tmp/foobar'],root='/tmp/different') + conf = InvokeAIAppConfig.get_config(root='/tmp/different') + conf.parse_args(argv=['--root=/tmp/foobar']) assert conf.root==Path('/tmp/different') assert isinstance(conf.root,Path) From e56965ad76f96624bb3dac30997619858f67663d Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 25 May 2023 21:10:00 -0400 Subject: [PATCH 2/2] documentation tweaks; fixed initialization in a couple more places --- invokeai/app/services/config.py | 39 +++++++++++++++---- invokeai/backend/config/invokeai_configure.py | 6 +-- .../training/textual_inversion_training.py | 2 +- 3 files changed, 35 insertions(+), 12 deletions(-) diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index 25b2241d58..208f6d9949 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -51,18 +51,32 @@ in INVOKEAI_ROOT. You can replace supersede this by providing any OmegaConf dictionary object initialization time: omegaconf = OmegaConf.load('/tmp/init.yaml') - conf = InvokeAIAppConfig(conf=omegaconf) + conf = InvokeAIAppConfig() + conf.parse_args(conf=omegaconf) -By default, InvokeAIAppConfig will parse the contents of `sys.argv` at -initialization time. You may pass a list of strings in the optional +InvokeAIAppConfig.parse_args() will parse the contents of `sys.argv` +at initialization time. You may pass a list of strings in the optional `argv` argument to use instead of the system argv: - conf = InvokeAIAppConfig(arg=['--xformers_enabled']) + conf.parse_args(argv=['--xformers_enabled']) -It is also possible to set a value at initialization time. This value -has highest priority. +It is also possible to set a value at initialization time. However, if +you call parse_args() it may be overwritten. conf = InvokeAIAppConfig(xformers_enabled=True) + conf.parse_args(argv=['--no-xformers']) + conf.xformers_enabled + # False + + +To avoid this, use `get_config()` to retrieve the application-wide +configuration object. This will retain any properties set at object +creation time: + + conf = InvokeAIAppConfig.get_config(xformers_enabled=True) + conf.parse_args(argv=['--no-xformers']) + conf.xformers_enabled + # True Any setting can be overwritten by setting an environment variable of form: "INVOKEAI_", as in: @@ -76,16 +90,24 @@ Order of precedence (from highest): 4) config file options 5) pydantic defaults -Typical usage: +Typical usage at the top level file: from invokeai.app.services.config import InvokeAIAppConfig - from invokeai.invocations.generate import TextToImageInvocation # get global configuration and print its nsfw_checker value conf = InvokeAIAppConfig.get_config() conf.parse_args() print(conf.nsfw_checker) +Typical usage in a backend module: + + from invokeai.app.services.config import InvokeAIAppConfig + + # get global configuration and print its nsfw_checker value + conf = InvokeAIAppConfig.get_config() + print(conf.nsfw_checker) + + Computed properties: The InvokeAIAppConfig object has a series of properties that @@ -138,6 +160,7 @@ two configs are kept in separate sections of the config file: legacy_conf_dir: configs/stable-diffusion outdir: outputs ... + ''' from __future__ import annotations import argparse diff --git a/invokeai/backend/config/invokeai_configure.py b/invokeai/backend/config/invokeai_configure.py index cf38dd93a6..4c0b0e3641 100755 --- a/invokeai/backend/config/invokeai_configure.py +++ b/invokeai/backend/config/invokeai_configure.py @@ -631,7 +631,7 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam def default_startup_options(init_file: Path) -> Namespace: - opts = InvokeAIAppConfig(argv=[]) + opts = InvokeAIAppConfig.get_config() outdir = Path(opts.outdir) if not outdir.is_absolute(): opts.outdir = str(config.root / opts.outdir) @@ -696,7 +696,7 @@ def write_opts(opts: Namespace, init_file: Path): """ # this will load current settings - config = InvokeAIAppConfig() + config = InvokeAIAppConfig.get_config() for key,value in opts.__dict__.items(): if hasattr(config,key): setattr(config,key,value) @@ -728,7 +728,7 @@ def write_default_options(program_opts: Namespace, initfile: Path): # yaml format. def migrate_init_file(legacy_format:Path): old = legacy_parser.parse_args([f'@{str(legacy_format)}']) - new = InvokeAIAppConfig(conf={}) + new = InvokeAIAppConfig.get_config() fields = list(get_type_hints(InvokeAIAppConfig).keys()) for attr in fields: diff --git a/invokeai/backend/training/textual_inversion_training.py b/invokeai/backend/training/textual_inversion_training.py index 8c27a6e718..c4290cacb3 100644 --- a/invokeai/backend/training/textual_inversion_training.py +++ b/invokeai/backend/training/textual_inversion_training.py @@ -88,7 +88,7 @@ def save_progress( def parse_args(): - config = InvokeAIAppConfig(argv=[]) + config = InvokeAIAppConfig.get_config() parser = PagingArgumentParser( description="Textual inversion training" )