fixes to env parsing, textual inversion & help text

- Make environment variable settings case InSenSiTive:
  INVOKEAI_MAX_LOADED_MODELS and InvokeAI_Max_Loaded_Models
  environment variables will both set `max_loaded_models`

- Updated realesrgan to use new config system.

- Updated textual_inversion_training to use new config system.

- Discovered a race condition when InvokeAIAppConfig is created
  at module load time, which makes it impossible to customize
  or replace the help message produced with --help on the command
  line. To fix this, moved all instances of get_invokeai_config()
  from module load time to object initialization time. Makes code
  cleaner, too.

- Added `--from_file` argument to `invokeai-node-cli` and changed
  github action to match. CI tests will hopefully work now.
This commit is contained in:
Lincoln Stein
2023-05-18 10:48:23 -04:00
parent f9710dd6ed
commit 7ea995149e
22 changed files with 118 additions and 116 deletions

View File

@ -4,6 +4,7 @@ import argparse
import os
import re
import shlex
import sys
import time
from typing import (
Union,
@ -195,6 +196,11 @@ def invoke_cli():
parser = config.get_parser()
parser.add_argument('commands',nargs='*')
invocation_commands = parser.parse_args().commands
# get the optional file to read commands from.
# Simplest is to use it for STDIN
if infile := config.from_file:
sys.stdin = open(infile,"r")
model_manager = get_model_manager(config,logger=logger)

View File

@ -1,4 +1,4 @@
# Copyright (c) 2023 Lincoln Stein (https://github.com/lstein)
# Copyright (c) 2023 Lincoln Stein (https://github.com/lstein) and the InvokeAI Development Team
'''Invokeai configuration system.
@ -206,8 +206,16 @@ class InvokeAISettings(BaseSettings):
if cls.initconf and settings_stanza in cls.initconf \
else OmegaConf.create()
# create an upcase version of the environment in
# order to achieve case-insensitive environment
# variables (the way Windows does)
upcase_environ = dict()
for key,value in os.environ.items():
upcase_environ[key.upper()] = value
fields = cls.__fields__
cls.argparse_groups = {}
for name, field in fields.items():
if name not in cls._excluded():
current_default = field.default
@ -216,8 +224,8 @@ class InvokeAISettings(BaseSettings):
env_name = env_prefix + '_' + name
if category in initconf and name in initconf.get(category):
field.default = initconf.get(category).get(name)
if env_name in os.environ:
field.default = os.environ[env_name]
if env_name.upper() in upcase_environ:
field.default = upcase_environ[env_name.upper()]
cls.add_field_argument(parser, name, field)
field.default = current_default
@ -353,6 +361,7 @@ setting environment variables INVOKEAI_<setting>.
legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths')
lora_dir : Path = Field(default='loras', description='Path to InvokeAI LoRA model directory', category='Paths')
outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths')
from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths')
model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models')
embeddings : bool = Field(default=True, description='Load contents of embeddings directory', category='Models')
@ -502,11 +511,11 @@ class PagingArgumentParser(argparse.ArgumentParser):
text = self.format_help()
pydoc.pager(text)
def get_invokeai_config(cls:Type[InvokeAISettings]=InvokeAIAppConfig)->InvokeAISettings:
def get_invokeai_config(cls:Type[InvokeAISettings]=InvokeAIAppConfig,**kwargs)->InvokeAISettings:
'''
This returns a singleton InvokeAIAppConfig configuration object.
'''
global global_config
if global_config is None or type(global_config)!=cls:
global_config = cls()
global_config = cls(**kwargs)
return global_config