refactor(config): simplified config

- Remove OmegaConf. It functioned as an intermediary data format, between YAML/argparse and pydantic. It's not necessary - we can parse YAML or CLI args directly with pydantic.

- Remove dynamic CLI args. Only `root` is explicitly supported. This greatly simplifies config handling. Configuration is done by editing the YAML file. Frequently-used args can be added if there is a demand.

- A separate arg parser is created to handle the slimmed-down CLI args. It's run immediately in the `invokeai-web` script to handle `--version` and `--help`. It is also used inside the singleton config getter (see below).

- Remove categories from the config. Our settings model is mostly flat. Handling categories adds complexity for both us and users - we have to handle transforming a flat config to categorized config (and vice-versa), while users have to be careful with indentation in their YAML file.

- Add a `meta` key to the config file. Currently, this holds the config schema version only. It is not a part of the config object itself.

- Remove legacy settings that are no longer referenced, or were effectively no-op settings when referenced in code.

- Implement simple migration logic to for v3 configs. If migration is successful, the v3 config file is backed up to `invokeai.yaml.bak` and the new config written to `invokeai.yaml`.

- Previously, the singleton config was accessed by calling `InvokeAIAppConfig.get_config()`. This returned an instance of `InvokeAIAppConfig`, which _also_ has the `get_config` function. This created to a confusing situation where you weren't sure if you needed to call `get_config` or just use the config object. This method is replaced by a standalone `get_config` function which returns a singleton config object.

- Wrap CLI arg parsing (for `root`) and loading/migrating `invokeai.yaml` into the new `get_config()` function.

- Move `generate_config_docstrings` into standalone utility function.

- Make `root` a private attr (`_root`). This reduces the temptation to directly modify and or use this sensitive field and ensures it is neither serialized nor read from input data. Use `root_path` to access the resolved root path, or `set_root` to set the root to something.
This commit is contained in:
psychedelicious 2024-03-11 22:45:24 +11:00
parent 7387b0bdc9
commit 3fb116155b
9 changed files with 318 additions and 775 deletions

View File

@ -2,6 +2,6 @@
from invokeai.app.services.config.config_common import PagingArgumentParser from invokeai.app.services.config.config_common import PagingArgumentParser
from .config_default import InvokeAIAppConfig, get_invokeai_config from .config_default import InvokeAIAppConfig, get_config
__all__ = ["InvokeAIAppConfig", "get_invokeai_config", "PagingArgumentParser"] __all__ = ["InvokeAIAppConfig", "get_config", "PagingArgumentParser"]

View File

@ -1,241 +0,0 @@
# Copyright (c) 2023 Lincoln Stein (https://github.com/lstein) and the InvokeAI Development Team
"""
Base class for the InvokeAI configuration system.
It defines a type of pydantic BaseSettings object that
is able to read and write from an omegaconf-based config file,
with overriding of settings from environment variables and/or
the command line.
"""
from __future__ import annotations
import argparse
import os
import sys
from argparse import ArgumentParser
from pathlib import Path
from typing import Any, ClassVar, Dict, List, Literal, Optional, Union, get_args, get_origin, get_type_hints
from omegaconf import DictConfig, DictKeyType, ListConfig, OmegaConf
from pydantic import BaseModel
from pydantic_settings import BaseSettings, SettingsConfigDict
from invokeai.app.services.config.config_common import PagingArgumentParser, int_or_float_or_str
class InvokeAISettings(BaseSettings):
"""Runtime configuration settings in which default values are read from an omegaconf .yaml file."""
initconf: ClassVar[Optional[DictConfig]] = None
argparse_groups: ClassVar[Dict[str, Any]] = {}
model_config = SettingsConfigDict(env_file_encoding="utf-8", arbitrary_types_allowed=True, case_sensitive=True)
def parse_args(self, argv: Optional[List[str]] = sys.argv[1:]) -> None:
"""Call to parse command-line arguments."""
parser = self.get_parser()
opt, unknown_opts = parser.parse_known_args(argv)
if len(unknown_opts) > 0:
print("Unknown args:", unknown_opts)
for name in self.model_fields:
if name not in self._excluded():
value = getattr(opt, name)
if isinstance(value, ListConfig):
value = list(value)
elif isinstance(value, DictConfig):
value = dict(value)
setattr(self, name, value)
def to_yaml(self) -> str:
"""Return a YAML string representing our settings. This can be used as the contents of `invokeai.yaml` to restore settings later."""
cls = self.__class__
type = get_args(get_type_hints(cls)["type"])[0]
field_dict: Dict[str, Dict[str, Any]] = {type: {}}
for name, field in self.model_fields.items():
if name in cls._excluded_from_yaml():
continue
assert isinstance(field.json_schema_extra, dict)
category = (
field.json_schema_extra.get("category", "Uncategorized") if field.json_schema_extra else "Uncategorized"
)
value = getattr(self, name)
assert isinstance(category, str)
if category not in field_dict[type]:
field_dict[type][category] = {}
if isinstance(value, BaseModel):
dump = value.model_dump(exclude_defaults=True, exclude_unset=True, exclude_none=True)
field_dict[type][category][name] = dump
continue
if isinstance(value, list):
if not value or len(value) == 0:
continue
primitive = isinstance(value[0], get_args(DictKeyType))
if not primitive:
val_list: List[Dict[str, Any]] = []
for list_val in value:
if isinstance(list_val, BaseModel):
dump = list_val.model_dump(exclude_defaults=True, exclude_unset=True, exclude_none=True)
val_list.append(dump)
field_dict[type][category][name] = val_list
continue
# keep paths as strings to make it easier to read
field_dict[type][category][name] = str(value) if isinstance(value, Path) else value
conf = OmegaConf.create(field_dict)
return OmegaConf.to_yaml(conf)
@classmethod
def add_parser_arguments(cls, parser: ArgumentParser) -> None:
"""Dynamically create arguments for a settings parser."""
if "type" in get_type_hints(cls):
settings_stanza = get_args(get_type_hints(cls)["type"])[0]
else:
settings_stanza = "Uncategorized"
env_prefix = getattr(cls.model_config, "env_prefix", None)
env_prefix = env_prefix if env_prefix is not None else settings_stanza.upper()
initconf = (
cls.initconf.get(settings_stanza)
if cls.initconf and settings_stanza in cls.initconf
else OmegaConf.create()
)
# create an upcase version of the environment in
# order to achieve case-insensitive environment
# variables (the way Windows does)
upcase_environ = {}
for key, value in os.environ.items():
upcase_environ[key.upper()] = value
fields = cls.model_fields
cls.argparse_groups = {}
for name, field in fields.items():
if name not in cls._excluded():
current_default = field.default
category = (
field.json_schema_extra.get("category", "Uncategorized")
if field.json_schema_extra
else "Uncategorized"
)
env_name = env_prefix + "_" + name
if category in initconf and name in initconf.get(category):
field.default = initconf.get(category).get(name)
if env_name.upper() in upcase_environ:
field.default = upcase_environ[env_name.upper()]
cls.add_field_argument(parser, name, field)
field.default = current_default
@classmethod
def cmd_name(cls, command_field: str = "type") -> str:
"""Return the category of a setting."""
hints = get_type_hints(cls)
if command_field in hints:
result: str = get_args(hints[command_field])[0]
return result
else:
return "Uncategorized"
@classmethod
def get_parser(cls) -> ArgumentParser:
"""Get the command-line parser for a setting."""
parser = PagingArgumentParser(
prog=cls.cmd_name(),
description=cls.__doc__,
)
cls.add_parser_arguments(parser)
return parser
@classmethod
def _excluded(cls) -> List[str]:
# internal fields that shouldn't be exposed as command line options
return ["type", "initconf"]
@classmethod
def _excluded_from_yaml(cls) -> List[str]:
# combination of deprecated parameters and internal ones that shouldn't be exposed as invokeai.yaml options
return [
"type",
"initconf",
"version",
"from_file",
"model",
"root",
"max_cache_size",
"max_vram_cache_size",
"always_use_cpu",
"free_gpu_mem",
"xformers_enabled",
"tiled_decode",
"lora_dir",
"embedding_dir",
"controlnet_dir",
"conf_path",
]
@classmethod
def add_field_argument(cls, command_parser, name: str, field, default_override=None) -> None:
"""Add the argparse arguments for a setting parser."""
field_type = get_type_hints(cls).get(name)
default = (
default_override
if default_override is not None
else field.default
if field.default_factory is None
else field.default_factory()
)
if category := (field.json_schema_extra.get("category", None) if field.json_schema_extra else None):
if category not in cls.argparse_groups:
cls.argparse_groups[category] = command_parser.add_argument_group(category)
argparse_group = cls.argparse_groups[category]
else:
argparse_group = command_parser
if get_origin(field_type) == Literal:
allowed_values = get_args(field.annotation)
allowed_types = set()
for val in allowed_values:
allowed_types.add(type(val))
allowed_types_list = list(allowed_types)
field_type = allowed_types_list[0] if len(allowed_types) == 1 else int_or_float_or_str
argparse_group.add_argument(
f"--{name}",
dest=name,
type=field_type,
default=default,
choices=allowed_values,
help=field.description,
)
elif get_origin(field_type) == Union:
argparse_group.add_argument(
f"--{name}",
dest=name,
type=int_or_float_or_str,
default=default,
help=field.description,
)
elif get_origin(field_type) == list:
argparse_group.add_argument(
f"--{name}",
dest=name,
nargs="*",
type=field.annotation,
default=default,
action=argparse.BooleanOptionalAction if field.annotation == bool else "store",
help=field.description,
)
else:
argparse_group.add_argument(
f"--{name}",
dest=name,
type=field.annotation,
default=default,
action=argparse.BooleanOptionalAction if field.annotation == bool else "store",
help=field.description,
)

View File

@ -12,7 +12,6 @@ from __future__ import annotations
import argparse import argparse
import pydoc import pydoc
from typing import Union
class PagingArgumentParser(argparse.ArgumentParser): class PagingArgumentParser(argparse.ArgumentParser):
@ -24,18 +23,3 @@ class PagingArgumentParser(argparse.ArgumentParser):
def print_help(self, file=None) -> None: def print_help(self, file=None) -> None:
text = self.format_help() text = self.format_help()
pydoc.pager(text) pydoc.pager(text)
def int_or_float_or_str(value: str) -> Union[int, float, str]:
"""
Workaround for argparse type checking.
"""
try:
return int(value)
except Exception as e: # noqa F841
pass
try:
return float(value)
except Exception as e: # noqa F841
pass
return str(value)

View File

@ -1,187 +1,17 @@
# Copyright (c) 2023 Lincoln Stein (https://github.com/lstein) and the InvokeAI Development Team
"""Invokeai configuration system.
Arguments and fields are taken from the pydantic definition of the
model. Defaults can be set by creating a yaml configuration file that
has a top-level key of "InvokeAI" and subheadings for each of the
categories returned by `invokeai --help`. The file looks like this:
[file: invokeai.yaml]
InvokeAI:
Web Server:
host: 127.0.0.1
port: 9090
allow_origins: []
allow_credentials: true
allow_methods:
- '*'
allow_headers:
- '*'
Features:
esrgan: true
internet_available: true
log_tokenization: false
patchmatch: true
ignore_missing_core_models: false
Paths:
autoimport_dir: autoimport
lora_dir: null
embedding_dir: null
controlnet_dir: null
models_dir: models
legacy_conf_dir: configs/stable-diffusion
db_dir: databases
outdir: /home/lstein/invokeai-main/outputs
use_memory_db: false
Logging:
log_handlers:
- console
log_format: plain
log_level: info
Model Cache:
ram: 13.5
vram: 0.25
lazy_offload: true
log_memory_usage: false
Device:
device: auto
precision: auto
Generation:
sequential_guidance: false
attention_type: xformers
attention_slice_size: auto
force_tiled_decode: false
The default name of the configuration file is `invokeai.yaml`, located
in INVOKEAI_ROOT. You can replace supersede this by providing any
OmegaConf dictionary object initialization time:
omegaconf = OmegaConf.load('/tmp/init.yaml')
conf = InvokeAIAppConfig()
conf.parse_args(conf=omegaconf)
InvokeAIAppConfig.parse_args() will parse the contents of `sys.argv`
at initialization time. You may pass a list of strings in the optional
`argv` argument to use instead of the system argv:
conf.parse_args(argv=['--log_tokenization'])
It is also possible to set a value at initialization time. However, if
you call parse_args() it may be overwritten.
conf = InvokeAIAppConfig(log_tokenization=True)
conf.parse_args(argv=['--no-log_tokenization'])
conf.log_tokenization
# False
To avoid this, use `get_config()` to retrieve the application-wide
configuration object. This will retain any properties set at object
creation time:
conf = InvokeAIAppConfig.get_config(log_tokenization=True)
conf.parse_args(argv=['--no-log_tokenization'])
conf.log_tokenization
# True
Any setting can be overwritten by setting an environment variable of
form: "INVOKEAI_<setting>", as in:
export INVOKEAI_port=8080
Order of precedence (from highest):
1) initialization options
2) command line options
3) environment variable options
4) config file options
5) pydantic defaults
Typical usage at the top level file:
from invokeai.app.services.config import InvokeAIAppConfig
# get global configuration and print its cache size
conf = InvokeAIAppConfig.get_config()
conf.parse_args()
print(conf.ram_cache_size)
Typical usage in a backend module:
from invokeai.app.services.config import InvokeAIAppConfig
# get global configuration and print its cache size value
conf = InvokeAIAppConfig.get_config()
print(conf.ram_cache_size)
Computed properties:
The InvokeAIAppConfig object has a series of properties that
resolve paths relative to the runtime root directory. They each return
a Path object:
root_path - path to InvokeAI root
output_path - path to default outputs directory
conf - alias for the above
embedding_path - path to the embeddings directory
lora_path - path to the LoRA directory
In most cases, you will want to create a single InvokeAIAppConfig
object for the entire application. The InvokeAIAppConfig.get_config() function
does this:
config = InvokeAIAppConfig.get_config()
config.parse_args() # read values from the command line/config file
print(config.root)
# Subclassing
If you wish to create a similar class, please subclass the
`InvokeAISettings` class and define a Literal field named "type",
which is set to the desired top-level name. For example, to create a
"InvokeBatch" configuration, define like this:
class InvokeBatch(InvokeAISettings):
type: Literal["InvokeBatch"] = "InvokeBatch"
node_count : int = Field(default=1, description="Number of nodes to run on", json_schema_extra=dict(category='Resources'))
cpu_count : int = Field(default=8, description="Number of GPUs to run on per node", json_schema_extra=dict(category='Resources'))
This will now read and write from the "InvokeBatch" section of the
config file, look for environment variables named INVOKEBATCH_*, and
accept the command-line arguments `--node_count` and `--cpu_count`. The
two configs are kept in separate sections of the config file:
# invokeai.yaml
InvokeBatch:
Resources:
node_count: 1
cpu_count: 8
InvokeAI:
Paths:
root: /home/lstein/invokeai-main
legacy_conf_dir: configs/stable-diffusion
outdir: outputs
...
"""
from __future__ import annotations from __future__ import annotations
import os import os
import re import re
from functools import lru_cache
from pathlib import Path from pathlib import Path
from typing import Any, ClassVar, Dict, List, Literal, Optional from typing import Any, Literal, Optional
from omegaconf import DictConfig, OmegaConf import yaml
from pydantic import BaseModel, Field, field_validator from pydantic import BaseModel, Field, PrivateAttr, field_validator
from pydantic.config import JsonDict from pydantic_settings import BaseSettings, SettingsConfigDict
from pydantic_settings import SettingsConfigDict
from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS from invokeai.backend.model_hash.model_hash import HASHING_ALGORITHMS
from invokeai.frontend.cli.app_arg_parser import app_arg_parser
from .config_base import InvokeAISettings
INIT_FILE = Path("invokeai.yaml") INIT_FILE = Path("invokeai.yaml")
DB_FILE = Path("invokeai.db") DB_FILE = Path("invokeai.db")
@ -189,28 +19,16 @@ LEGACY_INIT_FILE = Path("invokeai.init")
DEFAULT_RAM_CACHE = 10.0 DEFAULT_RAM_CACHE = 10.0
DEFAULT_VRAM_CACHE = 0.25 DEFAULT_VRAM_CACHE = 0.25
DEFAULT_CONVERT_CACHE = 20.0 DEFAULT_CONVERT_CACHE = 20.0
DEVICE = Literal["auto", "cpu", "cuda", "cuda:1", "mps"]
PRECISION = Literal["auto", "float16", "bfloat16", "float32", "autocast"]
ATTENTION_TYPE = Literal["auto", "normal", "xformers", "sliced", "torch-sdp"]
ATTENTION_SLICE_SIZE = Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8]
LOG_FORMAT = Literal["plain", "color", "syslog", "legacy"]
LOG_LEVEL = Literal["debug", "info", "warning", "error", "critical"]
CONFIG_SCHEMA_VERSION = 4
class Categories(object): class URLRegexTokenPair(BaseModel):
"""Category headers for configuration variable groups."""
WebServer: JsonDict = {"category": "Web Server"}
Features: JsonDict = {"category": "Features"}
Paths: JsonDict = {"category": "Paths"}
Logging: JsonDict = {"category": "Logging"}
Development: JsonDict = {"category": "Development"}
CLIArgs: JsonDict = {"category": "CLIArgs"}
ModelInstall: JsonDict = {"category": "Model Install"}
ModelCache: JsonDict = {"category": "Model Cache"}
Device: JsonDict = {"category": "Device"}
Generation: JsonDict = {"category": "Generation"}
Queue: JsonDict = {"category": "Queue"}
Nodes: JsonDict = {"category": "Nodes"}
MemoryPerformance: JsonDict = {"category": "Memory/Performance"}
Deprecated: JsonDict = {"category": "Deprecated"}
class URLRegexToken(BaseModel):
url_regex: str = Field(description="Regular expression to match against the URL") url_regex: str = Field(description="Regular expression to match against the URL")
token: str = Field(description="Token to use when the URL matches the regex") token: str = Field(description="Token to use when the URL matches the regex")
@ -225,397 +43,364 @@ class URLRegexToken(BaseModel):
return v return v
class InvokeAIAppConfig(InvokeAISettings): class ConfigMeta(BaseModel):
"""Invoke App Configuration """Metadata for the config file. This is not stored in the config object."""
schema_version: int = CONFIG_SCHEMA_VERSION
class InvokeAIAppConfig(BaseSettings):
"""Invoke's global app configuration.
Typically, you won't need to interact with this class directly. Instead, use the `get_config` function from `invokeai.app.services.config` to get a singleton config object.
Attributes: Attributes:
host: **Web Server**: IP address to bind to. Use `0.0.0.0` to serve to your local network. host: IP address to bind to. Use `0.0.0.0` to serve to your local network.
port: **Web Server**: Port to bind to. port: Port to bind to.
allow_origins: **Web Server**: Allowed CORS origins. allow_origins: Allowed CORS origins.
allow_credentials: **Web Server**: Allow CORS credentials. allow_credentials: Allow CORS credentials.
allow_methods: **Web Server**: Methods allowed for CORS. allow_methods: Methods allowed for CORS.
allow_headers: **Web Server**: Headers allowed for CORS. allow_headers: Headers allowed for CORS.
ssl_certfile: **Web Server**: SSL certificate file for HTTPS. ssl_certfile: SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https.
ssl_keyfile: **Web Server**: SSL key file for HTTPS. ssl_keyfile: SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https.
esrgan: **Features**: Enables or disables the upscaling code. log_tokenization: Enable logging of parsed prompt tokens.
internet_available: **Features**: If true, attempt to download models on the fly; otherwise only use local models. patchmatch: Enable patchmatch inpaint code.
log_tokenization: **Features**: Enable logging of parsed prompt tokens. ignore_missing_core_models: Ignore missing core models on startup. If `True`, the app will attempt to download missing models on startup.
patchmatch: **Features**: Enable patchmatch inpaint code. autoimport_dir: Path to a directory of models files to be imported on startup. WARNING: This may be a relative path. Use `autoimport_path` to get the resolved absolute path.
ignore_missing_core_models: **Features**: Ignore missing core models on startup. If `True`, the app will attempt to download missing models on startup. models_dir: Path to the models directory. WARNING: This may be a relative path. Use `models_path` to get the resolved absolute path.
root: **Paths**: The InvokeAI runtime root directory. convert_cache_dir: Path to the converted models cache directory. When loading a non-diffusers model, it will be converted and store on disk at this location. WARNING: This may be a relative path. Use `convert_cache_path` to get the resolved absolute path.
autoimport_dir: **Paths**: Path to a directory of models files to be imported on startup. legacy_conf_dir: Path to directory of legacy checkpoint config files. WARNING: This may be a relative path. Use `legacy_conf_path` to get the resolved absolute path.
models_dir: **Paths**: Path to the models directory. db_dir: Path to InvokeAI databases directory. WARNING: This may be a relative path. Use `db_path` to get the resolved absolute path.
convert_cache_dir: **Paths**: Path to the converted models cache directory. When loading a non-diffusers model, it will be converted and store on disk at this location. outputs_dir: Path to directory for outputs. WARNING: This may be a relative path. Use `outputs_path` to get the resolved absolute path.
legacy_conf_dir: **Paths**: Path to directory of legacy checkpoint config files. custom_nodes_dir: Path to directory for custom nodes. WARNING: This may be a relative path. Use `custom_nodes_path` to get the resolved absolute path.
db_dir: **Paths**: Path to InvokeAI databases directory. log_handlers: Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".
outdir: **Paths**: Path to directory for outputs. log_format: Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.
custom_nodes_dir: **Paths**: Path to directory for custom nodes. log_level: Emit logging messages at this level or higher.
from_file: **Paths**: Take command input from the indicated file (command-line client only). log_sql: Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.
log_handlers: **Logging**: Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>". use_memory_db: Use in-memory database. Useful for development.
log_format: **Logging**: Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style. dev_reload: Automatically reload when Python sources are changed. Does not reload node definitions.
log_level: **Logging**: Emit logging messages at this level or higher. profile_graphs: Enable graph profiling using `cProfile`.
log_sql: **Logging**: Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose. profile_prefix: An optional prefix for profile output files.
use_memory_db: **Development**: Use in-memory database. Useful for development. profiles_dir: Path to profiles output directory. WARNING: This may be a relative path. Use `profiles_path` to get the resolved absolute path.
dev_reload: **Development**: Automatically reload when Python sources are changed. Does not reload node definitions. ram: Maximum memory amount used by memory model cache for rapid switching (GB).
profile_graphs: **Development**: Enable graph profiling using `cProfile`. vram: Amount of VRAM reserved for model storage (GB)
profile_prefix: **Development**: An optional prefix for profile output files. convert_cache: Maximum size of on-disk converted models cache (GB)
profiles_dir: **Development**: Path to profiles output directory. lazy_offload: Keep models in VRAM until their space is needed.
version: **CLIArgs**: CLI arg - show InvokeAI version and exit. log_memory_usage: If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.
hashing_algorithm: **Model Install**: Model hashing algorthim for model installs. 'blake3' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3. device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
remote_api_tokens: **Model Install**: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token. precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.
ram: **Model Cache**: Maximum memory amount used by memory model cache for rapid switching (GB). sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
vram: **Model Cache**: Amount of VRAM reserved for model storage (GB) attention_type: Attention type.
convert_cache: **Model Cache**: Maximum size of on-disk converted models cache (GB) attention_slice_size: Slice size, valid when attention_type=="sliced".
lazy_offload: **Model Cache**: Keep models in VRAM until their space is needed. force_tiled_decode: Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty).
log_memory_usage: **Model Cache**: If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour. png_compress_level: The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting.
device: **Device**: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities. max_queue_size: Maximum number of items in the session queue.
precision: **Device**: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system. allow_nodes: List of nodes to allow. Omit to allow all.
sequential_guidance: **Generation**: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements. deny_nodes: List of nodes to deny. Omit to deny none.
attention_type: **Generation**: Attention type. node_cache_size: How many cached nodes to keep in memory.
attention_slice_size: **Generation**: Slice size, valid when attention_type=="sliced". hashing_algorithm: Model hashing algorthim for model installs. 'blake3' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.
force_tiled_decode: **Generation**: Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty). remote_api_tokens: List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.
png_compress_level: **Generation**: The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting.
max_queue_size: **Queue**: Maximum number of items in the session queue.
allow_nodes: **Nodes**: List of nodes to allow. Omit to allow all.
deny_nodes: **Nodes**: List of nodes to deny. Omit to deny none.
node_cache_size: **Nodes**: How many cached nodes to keep in memory.
""" """
singleton_config: ClassVar[Optional[InvokeAIAppConfig]] = None _root: Optional[Path] = PrivateAttr(default=None)
singleton_init: ClassVar[Optional[Dict[str, Any]]] = None
# fmt: off # fmt: off
type: Literal["InvokeAI"] = "InvokeAI"
# WEB # WEB
host : str = Field(default="127.0.0.1", description="IP address to bind to. Use `0.0.0.0` to serve to your local network.", json_schema_extra=Categories.WebServer) host: str = Field(default="127.0.0.1", description="IP address to bind to. Use `0.0.0.0` to serve to your local network.")
port : int = Field(default=9090, description="Port to bind to.", json_schema_extra=Categories.WebServer) port: int = Field(default=9090, description="Port to bind to.")
allow_origins : List[str] = Field(default=[], description="Allowed CORS origins.", json_schema_extra=Categories.WebServer) allow_origins: list[str] = Field(default=[], description="Allowed CORS origins.")
allow_credentials : bool = Field(default=True, description="Allow CORS credentials.", json_schema_extra=Categories.WebServer) allow_credentials: bool = Field(default=True, description="Allow CORS credentials.")
allow_methods : List[str] = Field(default=["*"], description="Methods allowed for CORS.", json_schema_extra=Categories.WebServer) allow_methods: list[str] = Field(default=["*"], description="Methods allowed for CORS.")
allow_headers : List[str] = Field(default=["*"], description="Headers allowed for CORS.", json_schema_extra=Categories.WebServer) allow_headers: list[str] = Field(default=["*"], description="Headers allowed for CORS.")
# SSL options correspond to https://www.uvicorn.org/settings/#https ssl_certfile: Optional[Path] = Field(default=None, description="SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https.")
ssl_certfile : Optional[Path] = Field(default=None, description="SSL certificate file for HTTPS.", json_schema_extra=Categories.WebServer) ssl_keyfile: Optional[Path] = Field(default=None, description="SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https.")
ssl_keyfile : Optional[Path] = Field(default=None, description="SSL key file for HTTPS.", json_schema_extra=Categories.WebServer)
# FEATURES # MISC FEATURES
esrgan : bool = Field(default=True, description="Enables or disables the upscaling code.", json_schema_extra=Categories.Features) log_tokenization: bool = Field(default=False, description="Enable logging of parsed prompt tokens.")
# TODO(psyche): This is not used anywhere. patchmatch: bool = Field(default=True, description="Enable patchmatch inpaint code.")
internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models.", json_schema_extra=Categories.Features) ignore_missing_core_models: bool = Field(default=False, description="Ignore missing core models on startup. If `True`, the app will attempt to download missing models on startup.")
log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", json_schema_extra=Categories.Features)
patchmatch : bool = Field(default=True, description="Enable patchmatch inpaint code.", json_schema_extra=Categories.Features)
ignore_missing_core_models : bool = Field(default=False, description='Ignore missing core models on startup. If `True`, the app will attempt to download missing models on startup.', json_schema_extra=Categories.Features)
# PATHS # PATHS
root : Optional[Path] = Field(default=None, description='The InvokeAI runtime root directory.', json_schema_extra=Categories.Paths) autoimport_dir: Path = Field(default=Path("autoimport"), description="Path to a directory of models files to be imported on startup. WARNING: This may be a relative path. Use `autoimport_path` to get the resolved absolute path.")
autoimport_dir : Path = Field(default=Path('autoimport'), description='Path to a directory of models files to be imported on startup.', json_schema_extra=Categories.Paths) models_dir: Path = Field(default=Path("models"), description="Path to the models directory. WARNING: This may be a relative path. Use `models_path` to get the resolved absolute path.")
models_dir : Path = Field(default=Path('models'), description='Path to the models directory.', json_schema_extra=Categories.Paths) convert_cache_dir: Path = Field(default=Path("models/.cache"), description="Path to the converted models cache directory. When loading a non-diffusers model, it will be converted and store on disk at this location. WARNING: This may be a relative path. Use `convert_cache_path` to get the resolved absolute path.")
convert_cache_dir : Path = Field(default=Path('models/.cache'), description='Path to the converted models cache directory. When loading a non-diffusers model, it will be converted and store on disk at this location.', json_schema_extra=Categories.Paths) legacy_conf_dir: Path = Field(default=Path("configs/stable-diffusion"), description="Path to directory of legacy checkpoint config files. WARNING: This may be a relative path. Use `legacy_conf_path` to get the resolved absolute path.")
legacy_conf_dir : Path = Field(default=Path('configs/stable-diffusion'), description='Path to directory of legacy checkpoint config files.', json_schema_extra=Categories.Paths) db_dir: Path = Field(default=Path("databases"), description="Path to InvokeAI databases directory. WARNING: This may be a relative path. Use `db_path` to get the resolved absolute path.")
db_dir : Path = Field(default=Path('databases'), description='Path to InvokeAI databases directory.', json_schema_extra=Categories.Paths) outputs_dir: Path = Field(default=Path("outputs"), description="Path to directory for outputs. WARNING: This may be a relative path. Use `outputs_path` to get the resolved absolute path.")
outdir : Path = Field(default=Path('outputs'), description='Path to directory for outputs.', json_schema_extra=Categories.Paths) custom_nodes_dir: Path = Field(default=Path("nodes"), description="Path to directory for custom nodes. WARNING: This may be a relative path. Use `custom_nodes_path` to get the resolved absolute path.")
custom_nodes_dir : Path = Field(default=Path('nodes'), description='Path to directory for custom nodes.', json_schema_extra=Categories.Paths)
# TODO(psyche): This is not used anywhere.
from_file : Optional[Path] = Field(default=None, description='Take command input from the indicated file (command-line client only).', json_schema_extra=Categories.Paths)
# LOGGING # LOGGING
log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".', json_schema_extra=Categories.Logging) log_handlers: list[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>".')
# note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues # note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues
log_format : Literal['plain', 'color', 'syslog', 'legacy'] = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.', json_schema_extra=Categories.Logging) log_format: LOG_FORMAT = Field(default="color", description='Log format. Use "plain" for text-only, "color" for colorized output, "legacy" for 2.3-style logging and "syslog" for syslog-style.')
log_level : Literal["debug", "info", "warning", "error", "critical"] = Field(default="info", description="Emit logging messages at this level or higher.", json_schema_extra=Categories.Logging) log_level: LOG_LEVEL = Field(default="info", description="Emit logging messages at this level or higher.")
log_sql : bool = Field(default=False, description="Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.", json_schema_extra=Categories.Logging) log_sql: bool = Field(default=False, description="Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose.")
# Development # Development
use_memory_db : bool = Field(default=False, description='Use in-memory database. Useful for development.', json_schema_extra=Categories.Development) use_memory_db: bool = Field(default=False, description="Use in-memory database. Useful for development.")
dev_reload : bool = Field(default=False, description="Automatically reload when Python sources are changed. Does not reload node definitions.", json_schema_extra=Categories.Development) dev_reload: bool = Field(default=False, description="Automatically reload when Python sources are changed. Does not reload node definitions.")
profile_graphs : bool = Field(default=False, description="Enable graph profiling using `cProfile`.", json_schema_extra=Categories.Development) profile_graphs: bool = Field(default=False, description="Enable graph profiling using `cProfile`.")
profile_prefix : Optional[str] = Field(default=None, description="An optional prefix for profile output files.", json_schema_extra=Categories.Development) profile_prefix: Optional[str] = Field(default=None, description="An optional prefix for profile output files.")
profiles_dir : Path = Field(default=Path('profiles'), description="Path to profiles output directory.", json_schema_extra=Categories.Development) profiles_dir: Path = Field(default=Path("profiles"), description="Path to profiles output directory. WARNING: This may be a relative path. Use `profiles_path` to get the resolved absolute path.")
version : bool = Field(default=False, description="CLI arg - show InvokeAI version and exit.", json_schema_extra=Categories.CLIArgs)
# CACHE # CACHE
ram : float = Field(default=DEFAULT_RAM_CACHE, gt=0, description="Maximum memory amount used by memory model cache for rapid switching (GB).", json_schema_extra=Categories.ModelCache, ) ram: float = Field(default=DEFAULT_RAM_CACHE, gt=0, description="Maximum memory amount used by memory model cache for rapid switching (GB).")
vram : float = Field(default=DEFAULT_VRAM_CACHE, ge=0, description="Amount of VRAM reserved for model storage (GB)", json_schema_extra=Categories.ModelCache, ) vram: float = Field(default=DEFAULT_VRAM_CACHE, ge=0, description="Amount of VRAM reserved for model storage (GB)")
convert_cache : float = Field(default=DEFAULT_CONVERT_CACHE, ge=0, description="Maximum size of on-disk converted models cache (GB)", json_schema_extra=Categories.ModelCache) convert_cache: float = Field(default=DEFAULT_CONVERT_CACHE, ge=0, description="Maximum size of on-disk converted models cache (GB)")
lazy_offload: bool = Field(default=True, description="Keep models in VRAM until their space is needed.")
lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed.", json_schema_extra=Categories.ModelCache, ) log_memory_usage: bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.")
log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache)
# DEVICE # DEVICE
device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.", json_schema_extra=Categories.Device) device: DEVICE = Field(default="auto", description="Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.")
precision : Literal["auto", "float16", "bfloat16", "float32", "autocast"] = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.", json_schema_extra=Categories.Device) precision: PRECISION = Field(default="auto", description="Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.")
# GENERATION # GENERATION
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.", json_schema_extra=Categories.Generation) sequential_guidance: bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.")
attention_type : Literal["auto", "normal", "xformers", "sliced", "torch-sdp"] = Field(default="auto", description="Attention type.", json_schema_extra=Categories.Generation) attention_type: ATTENTION_TYPE = Field(default="auto", description="Attention type.")
attention_slice_size: Literal["auto", "balanced", "max", 1, 2, 3, 4, 5, 6, 7, 8] = Field(default="auto", description='Slice size, valid when attention_type=="sliced".', json_schema_extra=Categories.Generation) attention_slice_size: ATTENTION_SLICE_SIZE = Field(default="auto", description='Slice size, valid when attention_type=="sliced".')
force_tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty).", json_schema_extra=Categories.Generation) force_tiled_decode: bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty).")
png_compress_level : int = Field(default=1, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting.", json_schema_extra=Categories.Generation) png_compress_level: int = Field(default=1, description="The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting.")
max_queue_size: int = Field(default=10000, gt=0, description="Maximum number of items in the session queue.")
# QUEUE
max_queue_size : int = Field(default=10000, gt=0, description="Maximum number of items in the session queue.", json_schema_extra=Categories.Queue)
# NODES # NODES
allow_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to allow. Omit to allow all.", json_schema_extra=Categories.Nodes) allow_nodes: Optional[list[str]] = Field(default=None, description="List of nodes to allow. Omit to allow all.")
deny_nodes : Optional[List[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.", json_schema_extra=Categories.Nodes) deny_nodes: Optional[list[str]] = Field(default=None, description="List of nodes to deny. Omit to deny none.")
node_cache_size : int = Field(default=512, description="How many cached nodes to keep in memory.", json_schema_extra=Categories.Nodes) node_cache_size: int = Field(default=512, description="How many cached nodes to keep in memory.")
# MODEL INSTALL # MODEL INSTALL
hashing_algorithm : HASHING_ALGORITHMS = Field(default="blake3", description="Model hashing algorthim for model installs. 'blake3' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.", json_schema_extra=Categories.ModelInstall) hashing_algorithm: HASHING_ALGORITHMS = Field(default="blake3", description="Model hashing algorthim for model installs. 'blake3' is best for SSDs. 'blake3_single' is best for spinning disk HDDs. 'random' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don't care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.")
remote_api_tokens : Optional[list[URLRegexToken]] = Field( remote_api_tokens: Optional[list[URLRegexTokenPair]] = Field(default=None, description="List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.")
default=None,
description="List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token.",
json_schema_extra=Categories.ModelInstall
)
# TODO(psyche): Can we just remove these then?
# DEPRECATED FIELDS - STILL HERE IN ORDER TO OBTAN VALUES FROM PRE-3.1 CONFIG FILES
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", json_schema_extra=Categories.Deprecated)
max_cache_size : Optional[float] = Field(default=None, gt=0, description="Maximum memory amount used by model cache for rapid switching", json_schema_extra=Categories.Deprecated)
max_vram_cache_size : Optional[float] = Field(default=None, ge=0, description="Amount of VRAM reserved for model storage", json_schema_extra=Categories.Deprecated)
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", json_schema_extra=Categories.Deprecated)
tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", json_schema_extra=Categories.Deprecated)
lora_dir : Optional[Path] = Field(default=None, description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', json_schema_extra=Categories.Deprecated)
embedding_dir : Optional[Path] = Field(default=None, description='Path to a directory of Textual Inversion embeddings to be imported on startup.', json_schema_extra=Categories.Deprecated)
controlnet_dir : Optional[Path] = Field(default=None, description='Path to a directory of ControlNet embeddings to be imported on startup.', json_schema_extra=Categories.Deprecated)
conf_path : Path = Field(default=Path('configs/models.yaml'), description='Path to models definition file', json_schema_extra=Categories.Deprecated)
# this is not referred to in the source code and can be removed entirely
#free_gpu_mem : Optional[bool] = Field(default=None, description="If true, purge model from GPU after each generation.", json_schema_extra=Categories.MemoryPerformance)
# See InvokeAIAppConfig subclass below for CACHE and DEVICE categories
# fmt: on # fmt: on
model_config = SettingsConfigDict(validate_assignment=True, env_prefix="INVOKEAI") model_config = SettingsConfigDict(env_file_encoding="utf-8", case_sensitive=True, env_prefix="INVOKEAI")
def parse_args( def update_config(self, config: dict[str, Any] | InvokeAIAppConfig) -> None:
self, """Updates the config, overwriting existing values.
argv: Optional[list[str]] = None,
conf: Optional[DictConfig] = None, Args:
clobber: Optional[bool] = False, config: A dictionary of config settings, or instance of `InvokeAIAppConfig`. If an instance of \
) -> None: `InvokeAIAppConfig`, only the explicitly set fields will be merged into the singleton config.
""" """
Update settings with contents of init file, environment, and command-line settings.
:param conf: alternate Omegaconf dictionary object if isinstance(config, dict):
:param argv: aternate sys.argv list new_config = self.model_validate(config)
:param clobber: ovewrite any initialization parameters passed during initialization else:
new_config = config
for field_name in new_config.model_fields_set:
setattr(self, field_name, getattr(new_config, field_name))
def write_file(self, exclude_defaults: bool) -> None:
"""Write the current configuration to the `invokeai.yaml` file. This will overwrite the existing file.
A `meta` stanza is added to the top of the file, containing metadata about the config file. This is not stored in the config object.
Args:
exclude_defaults: If `True`, only include settings that were explicitly set. If `False`, include all settings, including defaults.
""" """
# Set the runtime root directory. We parse command-line switches here with open(self.init_file_path, "w") as file:
# in order to pick up the --root_dir option. meta_dict = {"meta": ConfigMeta().model_dump()}
super().parse_args(argv) config_dict = self.model_dump(mode="json", exclude_unset=True, exclude_defaults=exclude_defaults)
loaded_conf = None file.write("# Internal metadata\n")
if conf is None: file.write(yaml.dump(meta_dict, sort_keys=False))
try: file.write("\n")
loaded_conf = OmegaConf.load(self.root_dir / INIT_FILE) file.write("# User settings\n")
except Exception: file.write(yaml.dump(config_dict, sort_keys=False))
pass
if isinstance(loaded_conf, DictConfig):
InvokeAISettings.initconf = loaded_conf
else:
InvokeAISettings.initconf = conf
# parse args again in order to pick up settings in configuration file def set_root(self, root: Path) -> None:
super().parse_args(argv) """Set the runtime root directory. This is typically set using a CLI arg."""
self._root = root
if self.singleton_init and not clobber:
# When setting values in this way, set validate_assignment to true if you want to validate the value.
for k, v in self.singleton_init.items():
setattr(self, k, v)
@classmethod
def get_config(cls, **kwargs: Any) -> InvokeAIAppConfig:
"""Return a singleton InvokeAIAppConfig configuration object."""
if (
cls.singleton_config is None
or type(cls.singleton_config) is not cls
or (kwargs and cls.singleton_init != kwargs)
):
cls.singleton_config = cls(**kwargs)
cls.singleton_init = kwargs
return cls.singleton_config
@property
def root_path(self) -> Path:
"""Path to the runtime root directory."""
if self.root:
root = Path(self.root).expanduser().absolute()
else:
root = self.find_root().expanduser().absolute()
self.root = root # insulate ourselves from relative paths that may change
return root.resolve()
@property
def root_dir(self) -> Path:
"""Alias for above."""
return self.root_path
def _resolve(self, partial_path: Path) -> Path: def _resolve(self, partial_path: Path) -> Path:
return (self.root_path / partial_path).resolve() return (self.root_path / partial_path).resolve()
@property
def root_path(self) -> Path:
"""Path to the runtime root directory, resolved to an absolute path."""
if self._root:
root = Path(self._root).expanduser().absolute()
else:
root = self.find_root().expanduser().absolute()
self._root = root # insulate ourselves from relative paths that may change
return root.resolve()
@property @property
def init_file_path(self) -> Path: def init_file_path(self) -> Path:
"""Path to invokeai.yaml.""" """Path to invokeai.yaml, resolved to an absolute path.."""
resolved_path = self._resolve(INIT_FILE) resolved_path = self._resolve(INIT_FILE)
assert resolved_path is not None assert resolved_path is not None
return resolved_path return resolved_path
@property @property
def output_path(self) -> Optional[Path]: def autoimport_path(self) -> Path:
"""Path to defaults outputs directory.""" """Path to the autoimports directory, resolved to an absolute path.."""
return self._resolve(self.outdir) return self._resolve(self.autoimport_dir)
@property
def outputs_path(self) -> Optional[Path]:
"""Path to the outputs directory, resolved to an absolute path.."""
return self._resolve(self.outputs_dir)
@property @property
def db_path(self) -> Path: def db_path(self) -> Path:
"""Path to the invokeai.db file.""" """Path to the invokeai.db file, resolved to an absolute path.."""
db_dir = self._resolve(self.db_dir) db_dir = self._resolve(self.db_dir)
assert db_dir is not None assert db_dir is not None
return db_dir / DB_FILE return db_dir / DB_FILE
@property
def model_conf_path(self) -> Path:
"""Path to models configuration file."""
return self._resolve(self.conf_path)
@property @property
def legacy_conf_path(self) -> Path: def legacy_conf_path(self) -> Path:
"""Path to directory of legacy configuration files (e.g. v1-inference.yaml).""" """Path to directory of legacy configuration files (e.g. v1-inference.yaml), resolved to an absolute path.."""
return self._resolve(self.legacy_conf_dir) return self._resolve(self.legacy_conf_dir)
@property @property
def models_path(self) -> Path: def models_path(self) -> Path:
"""Path to the models directory.""" """Path to the models directory, resolved to an absolute path.."""
return self._resolve(self.models_dir) return self._resolve(self.models_dir)
@property @property
def models_convert_cache_path(self) -> Path: def convert_cache_path(self) -> Path:
"""Path to the converted cache models directory.""" """Path to the converted cache models directory, resolved to an absolute path.."""
return self._resolve(self.convert_cache_dir) return self._resolve(self.convert_cache_dir)
@property @property
def custom_nodes_path(self) -> Path: def custom_nodes_path(self) -> Path:
"""Path to the custom nodes directory.""" """Path to the custom nodes directory, resolved to an absolute path.."""
custom_nodes_path = self._resolve(self.custom_nodes_dir) custom_nodes_path = self._resolve(self.custom_nodes_dir)
assert custom_nodes_path is not None assert custom_nodes_path is not None
return custom_nodes_path return custom_nodes_path
# the following methods support legacy calls leftover from the Globals era
@property
def full_precision(self) -> bool:
"""Return true if precision set to float32."""
return self.precision == "float32"
@property
def try_patchmatch(self) -> bool:
"""Return true if patchmatch true."""
return self.patchmatch
@property
def nsfw_checker(self) -> bool:
"""Return value for NSFW checker. The NSFW node is always active and disabled from Web UI."""
return True
@property
def invisible_watermark(self) -> bool:
"""Return value of invisible watermark. It is always active and disabled from Web UI."""
return True
@property
def ram_cache_size(self) -> float:
"""Return the ram cache size using the legacy or modern setting (GB)."""
return self.max_cache_size or self.ram
@property
def vram_cache_size(self) -> float:
"""Return the vram cache size using the legacy or modern setting (GB)."""
return self.max_vram_cache_size or self.vram
@property
def convert_cache_size(self) -> float:
"""Return the convert cache size on disk (GB)."""
return self.convert_cache
@property
def use_cpu(self) -> bool:
"""Return true if the device is set to CPU or the always_use_cpu flag is set."""
return self.always_use_cpu or self.device == "cpu"
@property
def disable_xformers(self) -> bool:
"""Return true if enable_xformers is false (reversed logic) and attention type is not set to xformers."""
disabled_in_config = not self.xformers_enabled
return disabled_in_config and self.attention_type != "xformers"
@property @property
def profiles_path(self) -> Path: def profiles_path(self) -> Path:
"""Path to the graph profiles directory.""" """Path to the graph profiles directory, resolved to an absolute path.."""
return self._resolve(self.profiles_dir) return self._resolve(self.profiles_dir)
@staticmethod @staticmethod
def find_root() -> Path: def find_root() -> Path:
"""Choose the runtime root directory when not specified on command line or init file.""" """Choose the runtime root directory when not specified on command line or init file."""
return _find_root() venv = Path(os.environ.get("VIRTUAL_ENV") or ".")
if os.environ.get("INVOKEAI_ROOT"):
@staticmethod root = Path(os.environ["INVOKEAI_ROOT"])
def generate_docstrings() -> str: elif any((venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]):
"""Helper function for mkdocs. Generates a docstring for the InvokeAIAppConfig class. root = (venv.parent).resolve()
else:
You shouldn't run this manually. Instead, run `scripts/update-config-docstring.py` to update the docstring. root = Path("~/invokeai").expanduser().resolve()
A makefile target is also available: `make update-config-docstring`. return root
See that script for more information about why this is necessary.
"""
docstring = ' """Invoke App Configuration\n\n'
docstring += " Attributes:"
field_descriptions: dict[str, list[str]] = {}
for k, v in InvokeAIAppConfig.model_fields.items():
if not isinstance(v.json_schema_extra, dict):
# Should never happen
continue
category = v.json_schema_extra.get("category", None)
if not isinstance(category, str) or category == "Deprecated":
continue
if not field_descriptions.get(category):
field_descriptions[category] = []
field_descriptions[category].append(f" {k}: **{category}**: {v.description}")
for c in [
"Web Server",
"Features",
"Paths",
"Logging",
"Development",
"CLIArgs",
"Model Install",
"Model Cache",
"Device",
"Generation",
"Queue",
"Nodes",
]:
docstring += "\n"
docstring += "\n".join(field_descriptions[c])
docstring += '\n """'
return docstring
def get_invokeai_config(**kwargs: Any) -> InvokeAIAppConfig: def generate_config_docstrings() -> str:
"""Legacy function which returns InvokeAIAppConfig.get_config().""" """Helper function for mkdocs. Generates a docstring for the InvokeAIAppConfig class.
return InvokeAIAppConfig.get_config(**kwargs)
You shouldn't run this manually. Instead, run `scripts/update-config-docstring.py` to update the docstring.
A makefile target is also available: `make update-config-docstring`.
See that script for more information about why this is necessary.
"""
docstring = ' """Invoke\'s global app configuration.\n\n'
docstring += " Typically, you won't need to interact with this class directly. Instead, use the `get_config` function from `invokeai.app.services.config` to get a singleton config object.\n\n"
docstring += " Attributes:\n"
field_descriptions: list[str] = []
for k, v in InvokeAIAppConfig.model_fields.items():
if v.exclude:
continue
field_descriptions.append(f" {k}: {v.description}")
docstring += "\n".join(field_descriptions)
docstring += '\n """'
return docstring
def _find_root() -> Path: def load_config_from_file(config_path: Path) -> InvokeAIAppConfig:
venv = Path(os.environ.get("VIRTUAL_ENV") or ".") """Parse a config file into an InvokeAIAppConfig object. The file should be in YAML format."""
if os.environ.get("INVOKEAI_ROOT"): assert config_path.suffix == ".yaml"
root = Path(os.environ["INVOKEAI_ROOT"]) with open(config_path) as file:
elif any((venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]): loaded_config = InvokeAIAppConfig.model_validate(yaml.safe_load(file))
root = (venv.parent).resolve() return loaded_config
def migrate_v3_config_dict(config_dict: dict[str, Any]) -> InvokeAIAppConfig:
"""Migrate a v3 config dictionary to the latest version.
Args:
config_dict: A dictionary of settings from a v3 config file.
Returns:
An instance of `InvokeAIAppConfig` with the migrated settings.
"""
parsed_config_dict: dict[str, Any] = {}
for _category_name, category_dict in config_dict["InvokeAI"].items():
for k, v in category_dict.items():
if k == "outdir":
# `outdir` was renamed to `outputs_dir`
parsed_config_dict["outputs_dir"] = v
elif k in InvokeAIAppConfig.model_fields:
# skip unknown fields
parsed_config_dict[k] = v
return InvokeAIAppConfig.model_validate(parsed_config_dict)
def load_and_migrate_config(config_path: Path) -> InvokeAIAppConfig:
"""Load and migrate a config file to the latest version.
Args:
config_path: Path to the config file.
Returns:
An instance of `InvokeAIAppConfig` with the loaded and migrated settings.
"""
assert config_path.suffix == ".yaml"
with open(config_path) as file:
loaded_config_dict = yaml.safe_load(file)
assert isinstance(loaded_config_dict, dict)
if "InvokeAI" in loaded_config_dict:
# This is a v3 config file, attempt to migrate it
try:
config = migrate_v3_config_dict(loaded_config_dict)
except Exception as e:
raise RuntimeError(f"Failed to load and migrate v3 config file {config_path}: {e}") from e
config_path.rename(config_path.with_suffix(".yaml.bak"))
# By excluding defaults, we ensure that the new config file only contains the settings that were explicitly set
config.write_file(exclude_defaults=True)
return config
else: else:
root = Path("~/invokeai").expanduser().resolve() # This is a v4 config file, attempt to load it
return root try:
# Meta is not included in the model fields, so we need to validate it separately
config_meta = ConfigMeta.model_validate(loaded_config_dict.pop("meta"))
assert (
config_meta.schema_version == CONFIG_SCHEMA_VERSION
), f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config_meta.schema_version}"
return InvokeAIAppConfig.model_validate(loaded_config_dict)
except Exception as e:
raise RuntimeError(f"Failed to load config file {config_path}: {e}") from e
@lru_cache(maxsize=1)
def get_config() -> InvokeAIAppConfig:
"""Return the global singleton app config.
On first call, CLI args are parsed and the config file is read and merged in to the singleton config.
On subsequent calls, the singleton config is returned.
"""
# Singleton app config
config = InvokeAIAppConfig()
# User may have specified a `root` as a CLI arg
opt = app_arg_parser.parse_args()
if root := getattr(opt, "root", None):
config.set_root(Path(root))
else:
config.set_root(config.find_root())
# Load the config file and merge it in to the app config
config_from_file = load_and_migrate_config(config.init_file_path)
config.update_config(config_from_file)
return config

View File

@ -1,5 +0,0 @@
"""
Initialization file for invokeai.frontend.CLI
"""
from .CLI import main as invokeai_command_line_interface # noqa: F401

View File

View File

@ -0,0 +1,14 @@
from argparse import ArgumentParser, RawTextHelpFormatter
from invokeai.version import __version__
root_help = r"""Sets a root directory for the app. If omitted, the app will search for the root directory in the following order:
- The `$INVOKEAI_ROOT` environment variable
- The currently active virtual environment's parent directory
- `$HOME/invokeai`"""
app_arg_parser = ArgumentParser(description="Invoke Studio", formatter_class=RawTextHelpFormatter)
app_arg_parser.add_argument("--root", type=str, help=root_help)
app_arg_parser.add_argument(
"--version", action="version", version=__version__, help="Displays the version and exits."
)

View File

@ -5,10 +5,16 @@
import logging import logging
import os import os
from invokeai.frontend.cli.app_arg_parser import app_arg_parser
logging.getLogger("xformers").addFilter(lambda record: "A matching Triton is not available" not in record.getMessage()) logging.getLogger("xformers").addFilter(lambda record: "A matching Triton is not available" not in record.getMessage())
def main(): def main():
# Parse CLI args immediately to handle `version` and `help` commands. Once the app starts up, we will parse the
# args again to get configuration args.
app_arg_parser.parse_args()
# Change working directory to the repo root # Change working directory to the repo root
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))

View File

@ -14,9 +14,9 @@ def main():
# Change working directory to the repo root # Change working directory to the repo root
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from invokeai.app.services.config.config_default import InvokeAIAppConfig from invokeai.app.services.config.config_default import generate_config_docstrings
docstring = InvokeAIAppConfig.generate_docstrings() docstring = generate_config_docstrings()
# Replace the docstring in the file # Replace the docstring in the file
with open("invokeai/app/services/config/config_default.py", "r") as f: with open("invokeai/app/services/config/config_default.py", "r") as f: