fixes to env parsing, textual inversion & help text

- Make environment variable settings case InSenSiTive:
  INVOKEAI_MAX_LOADED_MODELS and InvokeAI_Max_Loaded_Models
  environment variables will both set `max_loaded_models`

- Updated realesrgan to use new config system.

- Updated textual_inversion_training to use new config system.

- Discovered a race condition when InvokeAIAppConfig is created
  at module load time, which makes it impossible to customize
  or replace the help message produced with --help on the command
  line. To fix this, moved all instances of get_invokeai_config()
  from module load time to object initialization time. Makes code
  cleaner, too.

- Added `--from_file` argument to `invokeai-node-cli` and changed
  github action to match. CI tests will hopefully work now.
This commit is contained in:
Lincoln Stein
2023-05-18 10:48:23 -04:00
parent f9710dd6ed
commit 7ea995149e
22 changed files with 118 additions and 116 deletions

View File

@ -19,14 +19,14 @@ from huggingface_hub import (
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import get_invokeai_config
config = get_invokeai_config()
class HuggingFaceConceptsLibrary(object):
def __init__(self, root=None):
"""
Initialize the Concepts object. May optionally pass a root directory.
"""
self.root = root or config.root
self.config = get_invokeai_config()
self.root = root or self.config.root
self.hf_api = HfApi()
self.local_concepts = dict()
self.concept_list = None
@ -58,7 +58,7 @@ class HuggingFaceConceptsLibrary(object):
self.concept_list.extend(list(local_concepts_to_add))
return self.concept_list
return self.concept_list
elif config.internet_available is True:
elif self.config.internet_available is True:
try:
models = self.hf_api.list_models(
filter=ModelFilter(model_name="sd-concepts-library/")

View File

@ -43,8 +43,6 @@ from .diffusion import (
from .offloading import FullyLoadedModelGroup, LazilyLoadedModelGroup, ModelGroup
from .textual_inversion_manager import TextualInversionManager
config = get_invokeai_config()
@dataclass
class PipelineIntermediateState:
run_id: str
@ -348,6 +346,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
"""
if xformers is available, use it, otherwise use sliced attention.
"""
config = get_invokeai_config()
if (
torch.cuda.is_available()
and is_xformers_available()

View File

@ -32,8 +32,6 @@ ModelForwardCallback: TypeAlias = Union[
Callable[[torch.Tensor, torch.Tensor, torch.Tensor], torch.Tensor],
]
config = get_invokeai_config()
@dataclass(frozen=True)
class PostprocessingSettings:
threshold: float
@ -74,6 +72,7 @@ class InvokeAIDiffuserComponent:
:param model: the unet model to pass through to cross attention control
:param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning)
"""
config = get_invokeai_config()
self.conditioning = None
self.model = model
self.is_running_diffusers = is_running_diffusers