2024-03-15 10:35:37 +00:00
# TODO(psyche): pydantic-settings supports YAML settings sources. If we can figure out a way to integrate the YAML
# migration logic, we could use that for simpler config loading.
2023-05-26 00:41:26 +00:00
from __future__ import annotations
2023-08-18 01:48:04 +00:00
fix: use locale encoding
We have had a few bugs with v4 related to file encodings, especially on Windows.
Windows uses its own character encodings instead of `utf-8`, often `cp1252`. Some characters cannot be decoded using `utf-8`, causing `UnicodeDecodeError`.
There are a couple places where this can cause problems:
- In the installer bootstrap, we install or upgrade `pip` and decode the result, using `subprocess`.
The input to this includes the user's home dir. In #6105, the user had one of the problematic characters in their username. `subprocess` attempts and fails to decode the username, which crashes the installer.
To fix this, we need to use `locale.getpreferredencoding()` when executing the command.
- Similarly, in the model install service and config class, we attempt to load a yaml config file. If a problematic character is in the path to the file (which often includes the user's home dir), we can get the same error.
One example is #6129 in which the models.yaml migration fails.
To fix this, we need to open the file with `locale.getpreferredencoding()`.
2024-04-04 03:59:20 +00:00
import locale
2023-05-04 05:20:30 +00:00
import os
2024-03-08 02:32:26 +00:00
import re
2024-03-15 12:21:21 +00:00
import shutil
2024-03-11 11:45:24 +00:00
from functools import lru_cache
2023-05-04 05:20:30 +00:00
from pathlib import Path
2024-03-15 10:50:42 +00:00
from typing import Any , Literal , Optional
2023-08-23 01:15:59 +00:00
2024-03-19 01:41:42 +00:00
import psutil
2024-03-11 11:45:24 +00:00
import yaml
from pydantic import BaseModel , Field , PrivateAttr , field_validator
2024-03-21 00:55:49 +00:00
from pydantic_settings import BaseSettings , PydanticBaseSettingsSource , SettingsConfigDict
2023-08-18 01:48:04 +00:00
2024-03-19 02:18:54 +00:00
import invokeai . configs as model_configs
2024-03-13 22:44:55 +00:00
from invokeai . backend . model_hash . model_hash import HASHING_ALGORITHMS
feat: single app entrypoint with CLI arg parsing
We have two problems with how argparse is being utilized:
- We parse CLI args as the `api_app.py` file is read. This causes a problem pytest, which has an incompatible set of CLI args. Some tests import the FastAPI app, which triggers the config to parse CLI args, which receives the pytest args and fails.
- We've repeatedly had problems when something that uses the config is imported before the CLI args are parsed. When this happens, the root dir may not be set correctly, so we attempt to operate on incorrect paths.
To resolve these issues, we need to lift CLI arg parsing outside of the application code, but still let the application access the CLI args. We can create a external app entrypoint to do this.
- `InvokeAIArgs` is a simple helper class that parses CLI args and stores the result.
- `run_app()` is the new entrypoint. It first parses CLI args, then runs `invoke_api` to start the app.
The `invokeai-web` project script and `invokeai-web.py` dev script now call `run_app()` instead of `invoke_api()`.
The first time `get_config()` is called to get the singleton config object, it retrieves the args from `InvokeAIArgs`, sets the root dir if provided, then merges settings in from `invokeai.yaml`.
CLI arg parsing is now safely insulated from application code, but still accessible. And we don't need to worry about import order having an impact on anything, because by the time the app is running, we have already parsed CLI args. Whew!
2024-03-15 05:33:52 +00:00
from invokeai . frontend . cli . arg_parser import InvokeAIArgs
2023-05-04 05:20:30 +00:00
INIT_FILE = Path ( " invokeai.yaml " )
2023-06-04 00:24:41 +00:00
DB_FILE = Path ( " invokeai.db " )
2023-05-04 05:20:30 +00:00
LEGACY_INIT_FILE = Path ( " invokeai.init " )
2024-02-09 21:42:33 +00:00
DEFAULT_RAM_CACHE = 10.0
DEFAULT_VRAM_CACHE = 0.25
DEFAULT_CONVERT_CACHE = 20.0
2024-03-11 11:45:24 +00:00
DEVICE = Literal [ " auto " , " cpu " , " cuda " , " cuda:1 " , " mps " ]
2024-04-15 13:12:49 +00:00
PRECISION = Literal [ " auto " , " float16 " , " bfloat16 " , " float32 " ]
2024-03-11 11:45:24 +00:00
ATTENTION_TYPE = Literal [ " auto " , " normal " , " xformers " , " sliced " , " torch-sdp " ]
ATTENTION_SLICE_SIZE = Literal [ " auto " , " balanced " , " max " , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ]
LOG_FORMAT = Literal [ " plain " , " color " , " syslog " , " legacy " ]
LOG_LEVEL = Literal [ " debug " , " info " , " warning " , " error " , " critical " ]
2024-04-15 13:12:49 +00:00
CONFIG_SCHEMA_VERSION = " 4.0.1 "
2023-08-02 13:44:06 +00:00
2023-08-02 18:28:19 +00:00
2024-03-19 01:41:42 +00:00
def get_default_ram_cache_size ( ) - > float :
""" Run a heuristic for the default RAM cache based on installed RAM. """
# On some machines, psutil.virtual_memory().total gives a value that is slightly less than the actual RAM, so the
# limits are set slightly lower than than what we expect the actual RAM to be.
GB = 1024 * * 3
max_ram = psutil . virtual_memory ( ) . total / GB
if max_ram > = 60 :
return 15.0
if max_ram > = 30 :
return 7.5
if max_ram > = 14 :
return 4.0
return 2.1 # 2.1 is just large enough for sd 1.5 ;-)
2024-03-11 11:45:24 +00:00
class URLRegexTokenPair ( BaseModel ) :
2024-03-08 02:32:26 +00:00
url_regex : str = Field ( description = " Regular expression to match against the URL " )
token : str = Field ( description = " Token to use when the URL matches the regex " )
@field_validator ( " url_regex " )
@classmethod
def validate_url_regex ( cls , v : str ) - > str :
""" Validate that the value is a valid regex. """
try :
re . compile ( v )
except re . error as e :
raise ValueError ( f " Invalid regex: { e } " )
return v
2024-03-11 11:45:24 +00:00
class InvokeAIAppConfig ( BaseSettings ) :
""" Invoke ' s global app configuration.
Typically , you won ' t need to interact with this class directly. Instead, use the `get_config` function from `invokeai.app.services.config` to get a singleton config object.
2024-03-08 00:54:35 +00:00
Attributes :
2024-03-11 11:45:24 +00:00
host : IP address to bind to . Use ` 0.0 .0 .0 ` to serve to your local network .
port : Port to bind to .
allow_origins : Allowed CORS origins .
allow_credentials : Allow CORS credentials .
allow_methods : Methods allowed for CORS .
allow_headers : Headers allowed for CORS .
ssl_certfile : SSL certificate file for HTTPS . See https : / / www . uvicorn . org / settings / #https.
ssl_keyfile : SSL key file for HTTPS . See https : / / www . uvicorn . org / settings / #https.
log_tokenization : Enable logging of parsed prompt tokens .
patchmatch : Enable patchmatch inpaint code .
2024-03-15 08:15:49 +00:00
models_dir : Path to the models directory .
convert_cache_dir : Path to the converted models cache directory . When loading a non - diffusers model , it will be converted and store on disk at this location .
legacy_conf_dir : Path to directory of legacy checkpoint config files .
db_dir : Path to InvokeAI databases directory .
outputs_dir : Path to directory for outputs .
custom_nodes_dir : Path to directory for custom nodes .
2024-03-11 11:45:24 +00:00
log_handlers : Log handler . Valid options are " console " , " file=<path> " , " syslog=path|address:host:port " , " http=<url> " .
2024-03-15 08:15:49 +00:00
log_format : Log format . Use " plain " for text - only , " color " for colorized output , " legacy " for 2.3 - style logging and " syslog " for syslog - style . < br > Valid values : ` plain ` , ` color ` , ` syslog ` , ` legacy `
log_level : Emit logging messages at this level or higher . < br > Valid values : ` debug ` , ` info ` , ` warning ` , ` error ` , ` critical `
2024-03-11 11:45:24 +00:00
log_sql : Log SQL queries . ` log_level ` must be ` debug ` for this to do anything . Extremely verbose .
use_memory_db : Use in - memory database . Useful for development .
dev_reload : Automatically reload when Python sources are changed . Does not reload node definitions .
profile_graphs : Enable graph profiling using ` cProfile ` .
profile_prefix : An optional prefix for profile output files .
2024-03-15 08:15:49 +00:00
profiles_dir : Path to profiles output directory .
2024-03-11 11:45:24 +00:00
ram : Maximum memory amount used by memory model cache for rapid switching ( GB ) .
2024-03-15 08:15:49 +00:00
vram : Amount of VRAM reserved for model storage ( GB ) .
convert_cache : Maximum size of on - disk converted models cache ( GB ) .
2024-03-11 11:45:24 +00:00
lazy_offload : Keep models in VRAM until their space is needed .
log_memory_usage : If True , a memory snapshot will be captured before and after every model cache operation , and the result will be logged ( at debug level ) . There is a time cost to capturing the memory snapshots , so it is recommended to only enable this feature if you are actively inspecting the model cache ' s behaviour.
2024-03-15 08:15:49 +00:00
device : Preferred execution device . ` auto ` will choose the device depending on the hardware platform and the installed torch capabilities . < br > Valid values : ` auto ` , ` cpu ` , ` cuda ` , ` cuda : 1 ` , ` mps `
2024-04-15 13:12:49 +00:00
precision : Floating point precision . ` float16 ` will consume half the memory of ` float32 ` but produce slightly lower - quality images . The ` auto ` setting will guess the proper precision based on your video card and operating system . < br > Valid values : ` auto ` , ` float16 ` , ` bfloat16 ` , ` float32 `
2024-03-11 11:45:24 +00:00
sequential_guidance : Whether to calculate guidance in serial instead of in parallel , lowering memory requirements .
2024-03-15 08:15:49 +00:00
attention_type : Attention type . < br > Valid values : ` auto ` , ` normal ` , ` xformers ` , ` sliced ` , ` torch - sdp `
attention_slice_size : Slice size , valid when attention_type == " sliced " . < br > Valid values : ` auto ` , ` balanced ` , ` max ` , ` 1 ` , ` 2 ` , ` 3 ` , ` 4 ` , ` 5 ` , ` 6 ` , ` 7 ` , ` 8 `
2024-03-11 11:45:24 +00:00
force_tiled_decode : Whether to enable tiled VAE decode ( reduces memory consumption with some performance penalty ) .
2024-03-11 14:10:48 +00:00
pil_compress_level : The compress_level setting of PIL . Image . save ( ) , used for PNG encoding . All settings are lossless . 0 = no compression , 1 = fastest with slightly larger filesize , 9 = slowest with smallest filesize . 1 is typically the best setting .
2024-03-11 11:45:24 +00:00
max_queue_size : Maximum number of items in the session queue .
allow_nodes : List of nodes to allow . Omit to allow all .
deny_nodes : List of nodes to deny . Omit to deny none .
node_cache_size : How many cached nodes to keep in memory .
2024-03-21 06:43:13 +00:00
hashing_algorithm : Model hashing algorthim for model installs . ' blake3_multi ' is best for SSDs . ' blake3_single ' is best for spinning disk HDDs . ' random ' disables hashing , instead assigning a UUID to models . Useful when using a memory db to reduce model installation time , or if you don ' t care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3.<br>Valid values: `blake3_multi`, `blake3_single`, `random`, `md5`, `sha1`, `sha224`, `sha256`, `sha384`, `sha512`, `blake2b`, `blake2s`, `sha3_224`, `sha3_256`, `sha3_384`, `sha3_512`, `shake_128`, `shake_256`
2024-03-11 11:45:24 +00:00
remote_api_tokens : List of regular expression and token pairs used when downloading models from URLs . The download URL is tested against the regex , and if it matches , the token is provided in as a Bearer token .
2024-03-27 04:24:18 +00:00
scan_models_on_startup : Scan the models directory on startup , registering orphaned models . This is typically only used in conjunction with ` use_memory_db ` for testing purposes .
2024-03-08 00:54:35 +00:00
"""
2023-07-27 14:54:01 +00:00
2024-03-11 11:45:24 +00:00
_root : Optional [ Path ] = PrivateAttr ( default = None )
2024-03-19 05:24:13 +00:00
_config_file : Optional [ Path ] = PrivateAttr ( default = None )
2023-07-04 21:05:35 +00:00
2023-05-04 05:20:30 +00:00
# fmt: off
2023-08-17 17:47:26 +00:00
2024-03-15 12:21:21 +00:00
# INTERNAL
2024-03-19 10:28:07 +00:00
schema_version : str = Field ( default = CONFIG_SCHEMA_VERSION , description = " Schema version of the config file. This is not a user-configurable setting. " )
2024-03-19 09:24:02 +00:00
# This is only used during v3 models.yaml migration
2024-03-15 12:21:21 +00:00
legacy_models_yaml_path : Optional [ Path ] = Field ( default = None , description = " Path to the legacy models.yaml file. This is not a user-configurable setting. " )
2023-08-17 17:47:26 +00:00
# WEB
2024-03-11 11:45:24 +00:00
host : str = Field ( default = " 127.0.0.1 " , description = " IP address to bind to. Use `0.0.0.0` to serve to your local network. " )
port : int = Field ( default = 9090 , description = " Port to bind to. " )
allow_origins : list [ str ] = Field ( default = [ ] , description = " Allowed CORS origins. " )
allow_credentials : bool = Field ( default = True , description = " Allow CORS credentials. " )
allow_methods : list [ str ] = Field ( default = [ " * " ] , description = " Methods allowed for CORS. " )
allow_headers : list [ str ] = Field ( default = [ " * " ] , description = " Headers allowed for CORS. " )
ssl_certfile : Optional [ Path ] = Field ( default = None , description = " SSL certificate file for HTTPS. See https://www.uvicorn.org/settings/#https. " )
ssl_keyfile : Optional [ Path ] = Field ( default = None , description = " SSL key file for HTTPS. See https://www.uvicorn.org/settings/#https. " )
# MISC FEATURES
log_tokenization : bool = Field ( default = False , description = " Enable logging of parsed prompt tokens. " )
patchmatch : bool = Field ( default = True , description = " Enable patchmatch inpaint code. " )
2023-05-17 19:22:58 +00:00
2023-08-17 17:47:26 +00:00
# PATHS
2024-03-15 08:15:49 +00:00
models_dir : Path = Field ( default = Path ( " models " ) , description = " Path to the models directory. " )
convert_cache_dir : Path = Field ( default = Path ( " models/.cache " ) , description = " Path to the converted models cache directory. When loading a non-diffusers model, it will be converted and store on disk at this location. " )
2024-03-19 01:44:35 +00:00
legacy_conf_dir : Path = Field ( default = Path ( " configs " ) , description = " Path to directory of legacy checkpoint config files. " )
2024-03-15 08:15:49 +00:00
db_dir : Path = Field ( default = Path ( " databases " ) , description = " Path to InvokeAI databases directory. " )
outputs_dir : Path = Field ( default = Path ( " outputs " ) , description = " Path to directory for outputs. " )
custom_nodes_dir : Path = Field ( default = Path ( " nodes " ) , description = " Path to directory for custom nodes. " )
2023-07-04 21:05:35 +00:00
2023-08-17 17:47:26 +00:00
# LOGGING
2024-03-11 11:45:24 +00:00
log_handlers : list [ str ] = Field ( default = [ " console " ] , description = ' Log handler. Valid options are " console " , " file=<path> " , " syslog=path|address:host:port " , " http=<url> " . ' )
2023-05-25 03:57:15 +00:00
# note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues
2024-03-11 11:45:24 +00:00
log_format : LOG_FORMAT = Field ( default = " color " , description = ' Log format. Use " plain " for text-only, " color " for colorized output, " legacy " for 2.3-style logging and " syslog " for syslog-style. ' )
log_level : LOG_LEVEL = Field ( default = " info " , description = " Emit logging messages at this level or higher. " )
log_sql : bool = Field ( default = False , description = " Log SQL queries. `log_level` must be `debug` for this to do anything. Extremely verbose. " )
2023-07-08 00:47:29 +00:00
2024-01-31 10:51:57 +00:00
# Development
2024-03-11 11:45:24 +00:00
use_memory_db : bool = Field ( default = False , description = " Use in-memory database. Useful for development. " )
dev_reload : bool = Field ( default = False , description = " Automatically reload when Python sources are changed. Does not reload node definitions. " )
profile_graphs : bool = Field ( default = False , description = " Enable graph profiling using `cProfile`. " )
profile_prefix : Optional [ str ] = Field ( default = None , description = " An optional prefix for profile output files. " )
2024-03-15 08:15:49 +00:00
profiles_dir : Path = Field ( default = Path ( " profiles " ) , description = " Path to profiles output directory. " )
2023-08-17 17:47:26 +00:00
# CACHE
2024-03-19 01:41:42 +00:00
ram : float = Field ( default_factory = get_default_ram_cache_size , gt = 0 , description = " Maximum memory amount used by memory model cache for rapid switching (GB). " )
2024-03-15 08:15:49 +00:00
vram : float = Field ( default = DEFAULT_VRAM_CACHE , ge = 0 , description = " Amount of VRAM reserved for model storage (GB). " )
convert_cache : float = Field ( default = DEFAULT_CONVERT_CACHE , ge = 0 , description = " Maximum size of on-disk converted models cache (GB). " )
2024-03-11 11:45:24 +00:00
lazy_offload : bool = Field ( default = True , description = " Keep models in VRAM until their space is needed. " )
log_memory_usage : bool = Field ( default = False , description = " If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache ' s behaviour. " )
2023-08-17 17:47:26 +00:00
# DEVICE
2024-03-11 11:45:24 +00:00
device : DEVICE = Field ( default = " auto " , description = " Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities. " )
precision : PRECISION = Field ( default = " auto " , description = " Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system. " )
2023-08-17 17:47:26 +00:00
# GENERATION
2024-03-11 11:45:24 +00:00
sequential_guidance : bool = Field ( default = False , description = " Whether to calculate guidance in serial instead of in parallel, lowering memory requirements. " )
attention_type : ATTENTION_TYPE = Field ( default = " auto " , description = " Attention type. " )
attention_slice_size : ATTENTION_SLICE_SIZE = Field ( default = " auto " , description = ' Slice size, valid when attention_type== " sliced " . ' )
force_tiled_decode : bool = Field ( default = False , description = " Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty). " )
2024-03-11 14:10:48 +00:00
pil_compress_level : int = Field ( default = 1 , description = " The compress_level setting of PIL.Image.save(), used for PNG encoding. All settings are lossless. 0 = no compression, 1 = fastest with slightly larger filesize, 9 = slowest with smallest filesize. 1 is typically the best setting. " )
2024-03-11 11:45:24 +00:00
max_queue_size : int = Field ( default = 10000 , gt = 0 , description = " Maximum number of items in the session queue. " )
feat: queued generation (#4502)
* fix(config): fix typing issues in `config/`
`config/invokeai_config.py`:
- use `Optional` for things that are optional
- fix typing of `ram_cache_size()` and `vram_cache_size()`
- remove unused and incorrectly typed method `autoconvert_path`
- fix types and logic for `parse_args()`, in which `InvokeAIAppConfig.initconf` *must* be a `DictConfig`, but function would allow it to be set as a `ListConfig`, which presumably would cause issues elsewhere
`config/base.py`:
- use `cls` for first arg of class methods
- use `Optional` for things that are optional
- fix minor type issue related to setting of `env_prefix`
- remove unused `add_subparser()` method, which calls `add_parser()` on an `ArgumentParser` (method only available on the `_SubParsersAction` object, which is returned from ArgumentParser.add_subparsers()`)
* feat: queued generation and batches
Due to a very messy branch with broad addition of `isort` on `main` alongside it, some git surgery was needed to get an agreeable git history. This commit represents all of the work on queued generation. See PR for notes.
* chore: flake8, isort, black
* fix(nodes): fix incorrect service stop() method
* fix(nodes): improve names of a few variables
* fix(tests): fix up tests after changes to batches/queue
* feat(tests): add unit tests for session queue helper functions
* feat(ui): dynamic prompts is always enabled
* feat(queue): add queue_status_changed event
* feat(ui): wip queue graphs
* feat(nodes): move cleanup til after invoker startup
* feat(nodes): add cancel_by_batch_ids
* feat(ui): wip batch graphs & UI
* fix(nodes): remove `Batch.batch_id` from required
* fix(ui): cleanup and use fixedCacheKey for all mutations
* fix(ui): remove orphaned nodes from canvas graphs
* fix(nodes): fix cancel_by_batch_ids result count
* fix(ui): only show cancel batch tooltip when batches were canceled
* chore: isort
* fix(api): return `[""]` when dynamic prompts generates no prompts
Just a simple fallback so we always have a prompt.
* feat(ui): dynamicPrompts.combinatorial is always on
There seems to be little purpose in using the combinatorial generation for dynamic prompts. I've disabled it by hiding it from the UI and defaulting combinatorial to true. If we want to enable it again in the future it's straightforward to do so.
* feat: add queue_id & support logic
* feat(ui): fix upscale button
It prepends the upscale operation to queue
* feat(nodes): return queue item when enqueuing a single graph
This facilitates one-off graph async workflows in the client.
* feat(ui): move controlnet autoprocess to queue
* fix(ui): fix non-serializable DOMRect in redux state
* feat(ui): QueueTable performance tweaks
* feat(ui): update queue list
Queue items expand to show the full queue item. Just as JSON for now.
* wip threaded session_processor
* feat(nodes,ui): fully migrate queue to session_processor
* feat(nodes,ui): add processor events
* feat(ui): ui tweaks
* feat(nodes,ui): consolidate events, reduce network requests
* feat(ui): cleanup & abstract queue hooks
* feat(nodes): optimize batch permutation
Use a generator to do only as much work as is needed.
Previously, though we only ended up creating exactly as many queue items as was needed, there was still some intermediary work that calculated *all* permutations. When that number was very high, the system had a very hard time and used a lot of memory.
The logic has been refactored to use a generator. Additionally, the batch validators are optimized to return early and use less memory.
* feat(ui): add seed behaviour parameter
This dynamic prompts parameter allows the seed to be randomized per prompt or per iteration:
- Per iteration: Use the same seed for all prompts in a single dynamic prompt expansion
- Per prompt: Use a different seed for every single prompt
"Per iteration" is appropriate for exploring a the latents space with a stable starting noise, while "Per prompt" provides more variation.
* fix(ui): remove extraneous random seed nodes from linear graphs
* fix(ui): fix controlnet autoprocess not working when queue is running
* feat(queue): add timestamps to queue status updates
Also show execution time in queue list
* feat(queue): change all execution-related events to use the `queue_id` as the room, also include `queue_item_id` in InvocationQueueItem
This allows for much simpler handling of queue items.
* feat(api): deprecate sessions router
* chore(backend): tidy logging in `dependencies.py`
* fix(backend): respect `use_memory_db`
* feat(backend): add `config.log_sql` (enables sql trace logging)
* feat: add invocation cache
Supersedes #4574
The invocation cache provides simple node memoization functionality. Nodes that use the cache are memoized and not re-executed if their inputs haven't changed. Instead, the stored output is returned.
## Results
This feature provides anywhere some significant to massive performance improvement.
The improvement is most marked on large batches of generations where you only change a couple things (e.g. different seed or prompt for each iteration) and low-VRAM systems, where skipping an extraneous model load is a big deal.
## Overview
A new `invocation_cache` service is added to handle the caching. There's not much to it.
All nodes now inherit a boolean `use_cache` field from `BaseInvocation`. This is a node field and not a class attribute, because specific instances of nodes may want to opt in or out of caching.
The recently-added `invoke_internal()` method on `BaseInvocation` is used as an entrypoint for the cache logic.
To create a cache key, the invocation is first serialized using pydantic's provided `json()` method, skipping the unique `id` field. Then python's very fast builtin `hash()` is used to create an integer key. All implementations of `InvocationCacheBase` must provide a class method `create_key()` which accepts an invocation and outputs a string or integer key.
## In-Memory Implementation
An in-memory implementation is provided. In this implementation, the node outputs are stored in memory as python classes. The in-memory cache does not persist application restarts.
Max node cache size is added as `node_cache_size` under the `Generation` config category.
It defaults to 512 - this number is up for discussion, but given that these are relatively lightweight pydantic models, I think it's safe to up this even higher.
Note that the cache isn't storing the big stuff - tensors and images are store on disk, and outputs include only references to them.
## Node Definition
The default for all nodes is to use the cache. The `@invocation` decorator now accepts an optional `use_cache: bool` argument to override the default of `True`.
Non-deterministic nodes, however, should set this to `False`. Currently, all random-stuff nodes, including `dynamic_prompt`, are set to `False`.
The field name `use_cache` is now effectively a reserved field name and possibly a breaking change if any community nodes use this as a field name. In hindsight, all our reserved field names should have been prefixed with underscores or something.
## One Gotcha
Leaf nodes probably want to opt out of the cache, because if they are not cached, their outputs are not saved again.
If you run the same graph multiple times, you only end up with a single image output, because the image storage side-effects are in the `invoke()` method, which is bypassed if we have a cache hit.
## Linear UI
The linear graphs _almost_ just work, but due to the gotcha, we need to be careful about the final image-outputting node. To resolve this, a `SaveImageInvocation` node is added and used in the linear graphs.
This node is similar to `ImagePrimitive`, except it saves a copy of its input image, and has `use_cache` set to `False` by default.
This is now the leaf node in all linear graphs, and is the only node in those graphs with `use_cache == False` _and_ the only node with `is_intermedate == False`.
## Workflow Editor
All nodes now have a footer with a new `Use Cache [ ]` checkbox. It defaults to the value set by the invocation in its python definition, but can be changed by the user.
The workflow/node validation logic has been updated to migrate old workflows to use the new default values for `use_cache`. Users may still want to review the settings that have been chosen. In the event of catastrophic failure when running this migration, the default value of `True` is applied, as this is correct for most nodes.
Users should consider saving their workflows after loading them in and having them updated.
## Future Enhancements - Callback
A future enhancement would be to provide a callback to the `use_cache` flag that would be run as the node is executed to determine, based on its own internal state, if the cache should be used or not.
This would be useful for `DynamicPromptInvocation`, where the deterministic behaviour is determined by the `combinatorial: bool` field.
## Future Enhancements - Persisted Cache
Similar to how the latents storage is backed by disk, the invocation cache could be persisted to the database or disk. We'd need to be very careful about deserializing outputs, but it's perhaps worth exploring in the future.
* fix(ui): fix queue list item width
* feat(nodes): do not send the whole node on every generator progress
* feat(ui): strip out old logic related to sessions
Things like `isProcessing` are no longer relevant with queue. Removed them all & updated everything be appropriate for queue. May be a few little quirks I've missed...
* feat(ui): fix up param collapse labels
* feat(ui): click queue count to go to queue tab
* tidy(queue): update comment, query format
* feat(ui): fix progress bar when canceling
* fix(ui): fix circular dependency
* feat(nodes): bail on node caching logic if `node_cache_size == 0`
* feat(nodes): handle KeyError on node cache pop
* feat(nodes): bypass cache codepath if caches is disabled
more better no do thing
* fix(ui): reset api cache on connect/disconnect
* feat(ui): prevent enqueue when no prompts generated
* feat(ui): add queue controls to workflow editor
* feat(ui): update floating buttons & other incidental UI tweaks
* fix(ui): fix missing/incorrect translation keys
* fix(tests): add config service to mock invocation services
invoking needs access to `node_cache_size` to occur
* optionally remove pause/resume buttons from queue UI
* option to disable prepending
* chore(ui): remove unused file
* feat(queue): remove `order_id` entirely, `item_id` is now an autoinc pk
---------
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-09-20 05:09:24 +00:00
feat(backend): allow/deny nodes
Allow denying and explicitly allowing nodes. When a not-allowed node is used, a pydantic `ValidationError` will be raised.
- When collecting all invocations, check against the allowlist and denylist first. When pydantic constructs any unions related to nodes, the denied nodes will be omitted
- Add `allow_nodes` and `deny_nodes` to `InvokeAIAppConfig`. These are `Union[list[str], None]`, and may be populated with the `type` of invocations.
- When `allow_nodes` is `None`, allow all nodes, else if it is `list[str]`, only allow nodes in the list
- When `deny_nodes` is `None`, deny no nodes, else if it is `list[str]`, deny nodes in the list
- `deny_nodes` overrides `allow_nodes`
2023-09-06 01:54:37 +00:00
# NODES
2024-03-11 11:45:24 +00:00
allow_nodes : Optional [ list [ str ] ] = Field ( default = None , description = " List of nodes to allow. Omit to allow all. " )
deny_nodes : Optional [ list [ str ] ] = Field ( default = None , description = " List of nodes to deny. Omit to deny none. " )
node_cache_size : int = Field ( default = 512 , description = " How many cached nodes to keep in memory. " )
feat(backend): allow/deny nodes
Allow denying and explicitly allowing nodes. When a not-allowed node is used, a pydantic `ValidationError` will be raised.
- When collecting all invocations, check against the allowlist and denylist first. When pydantic constructs any unions related to nodes, the denied nodes will be omitted
- Add `allow_nodes` and `deny_nodes` to `InvokeAIAppConfig`. These are `Union[list[str], None]`, and may be populated with the `type` of invocations.
- When `allow_nodes` is `None`, allow all nodes, else if it is `list[str]`, only allow nodes in the list
- When `deny_nodes` is `None`, deny no nodes, else if it is `list[str]`, deny nodes in the list
- `deny_nodes` overrides `allow_nodes`
2023-09-06 01:54:37 +00:00
2024-03-09 06:22:55 +00:00
# MODEL INSTALL
2024-03-21 06:43:13 +00:00
hashing_algorithm : HASHING_ALGORITHMS = Field ( default = " blake3_single " , description = " Model hashing algorthim for model installs. ' blake3_multi ' is best for SSDs. ' blake3_single ' is best for spinning disk HDDs. ' random ' disables hashing, instead assigning a UUID to models. Useful when using a memory db to reduce model installation time, or if you don ' t care about storing stable hashes for models. Alternatively, any other hashlib algorithm is accepted, though these are not nearly as performant as blake3. " )
2024-03-11 11:45:24 +00:00
remote_api_tokens : Optional [ list [ URLRegexTokenPair ] ] = Field ( default = None , description = " List of regular expression and token pairs used when downloading models from URLs. The download URL is tested against the regex, and if it matches, the token is provided in as a Bearer token. " )
2024-03-27 04:24:18 +00:00
scan_models_on_startup : bool = Field ( default = False , description = " Scan the models directory on startup, registering orphaned models. This is typically only used in conjunction with `use_memory_db` for testing purposes. " )
2024-03-11 11:45:24 +00:00
2023-05-04 05:20:30 +00:00
# fmt: on
2024-03-15 08:38:42 +00:00
model_config = SettingsConfigDict ( env_prefix = " INVOKEAI_ " , env_ignore_empty = True )
2023-08-07 18:04:53 +00:00
2024-03-15 08:38:42 +00:00
def update_config ( self , config : dict [ str , Any ] | InvokeAIAppConfig , clobber : bool = True ) - > None :
2024-03-11 11:45:24 +00:00
""" Updates the config, overwriting existing values.
2023-11-26 22:00:27 +00:00
2024-03-11 11:45:24 +00:00
Args :
config : A dictionary of config settings , or instance of ` InvokeAIAppConfig ` . If an instance of \
` InvokeAIAppConfig ` , only the explicitly set fields will be merged into the singleton config .
2024-03-15 08:38:42 +00:00
clobber : If ` True ` , overwrite existing values . If ` False ` , only update fields that are not already set .
2023-07-27 14:54:01 +00:00
"""
2024-03-11 11:45:24 +00:00
if isinstance ( config , dict ) :
new_config = self . model_validate ( config )
feat: queued generation (#4502)
* fix(config): fix typing issues in `config/`
`config/invokeai_config.py`:
- use `Optional` for things that are optional
- fix typing of `ram_cache_size()` and `vram_cache_size()`
- remove unused and incorrectly typed method `autoconvert_path`
- fix types and logic for `parse_args()`, in which `InvokeAIAppConfig.initconf` *must* be a `DictConfig`, but function would allow it to be set as a `ListConfig`, which presumably would cause issues elsewhere
`config/base.py`:
- use `cls` for first arg of class methods
- use `Optional` for things that are optional
- fix minor type issue related to setting of `env_prefix`
- remove unused `add_subparser()` method, which calls `add_parser()` on an `ArgumentParser` (method only available on the `_SubParsersAction` object, which is returned from ArgumentParser.add_subparsers()`)
* feat: queued generation and batches
Due to a very messy branch with broad addition of `isort` on `main` alongside it, some git surgery was needed to get an agreeable git history. This commit represents all of the work on queued generation. See PR for notes.
* chore: flake8, isort, black
* fix(nodes): fix incorrect service stop() method
* fix(nodes): improve names of a few variables
* fix(tests): fix up tests after changes to batches/queue
* feat(tests): add unit tests for session queue helper functions
* feat(ui): dynamic prompts is always enabled
* feat(queue): add queue_status_changed event
* feat(ui): wip queue graphs
* feat(nodes): move cleanup til after invoker startup
* feat(nodes): add cancel_by_batch_ids
* feat(ui): wip batch graphs & UI
* fix(nodes): remove `Batch.batch_id` from required
* fix(ui): cleanup and use fixedCacheKey for all mutations
* fix(ui): remove orphaned nodes from canvas graphs
* fix(nodes): fix cancel_by_batch_ids result count
* fix(ui): only show cancel batch tooltip when batches were canceled
* chore: isort
* fix(api): return `[""]` when dynamic prompts generates no prompts
Just a simple fallback so we always have a prompt.
* feat(ui): dynamicPrompts.combinatorial is always on
There seems to be little purpose in using the combinatorial generation for dynamic prompts. I've disabled it by hiding it from the UI and defaulting combinatorial to true. If we want to enable it again in the future it's straightforward to do so.
* feat: add queue_id & support logic
* feat(ui): fix upscale button
It prepends the upscale operation to queue
* feat(nodes): return queue item when enqueuing a single graph
This facilitates one-off graph async workflows in the client.
* feat(ui): move controlnet autoprocess to queue
* fix(ui): fix non-serializable DOMRect in redux state
* feat(ui): QueueTable performance tweaks
* feat(ui): update queue list
Queue items expand to show the full queue item. Just as JSON for now.
* wip threaded session_processor
* feat(nodes,ui): fully migrate queue to session_processor
* feat(nodes,ui): add processor events
* feat(ui): ui tweaks
* feat(nodes,ui): consolidate events, reduce network requests
* feat(ui): cleanup & abstract queue hooks
* feat(nodes): optimize batch permutation
Use a generator to do only as much work as is needed.
Previously, though we only ended up creating exactly as many queue items as was needed, there was still some intermediary work that calculated *all* permutations. When that number was very high, the system had a very hard time and used a lot of memory.
The logic has been refactored to use a generator. Additionally, the batch validators are optimized to return early and use less memory.
* feat(ui): add seed behaviour parameter
This dynamic prompts parameter allows the seed to be randomized per prompt or per iteration:
- Per iteration: Use the same seed for all prompts in a single dynamic prompt expansion
- Per prompt: Use a different seed for every single prompt
"Per iteration" is appropriate for exploring a the latents space with a stable starting noise, while "Per prompt" provides more variation.
* fix(ui): remove extraneous random seed nodes from linear graphs
* fix(ui): fix controlnet autoprocess not working when queue is running
* feat(queue): add timestamps to queue status updates
Also show execution time in queue list
* feat(queue): change all execution-related events to use the `queue_id` as the room, also include `queue_item_id` in InvocationQueueItem
This allows for much simpler handling of queue items.
* feat(api): deprecate sessions router
* chore(backend): tidy logging in `dependencies.py`
* fix(backend): respect `use_memory_db`
* feat(backend): add `config.log_sql` (enables sql trace logging)
* feat: add invocation cache
Supersedes #4574
The invocation cache provides simple node memoization functionality. Nodes that use the cache are memoized and not re-executed if their inputs haven't changed. Instead, the stored output is returned.
## Results
This feature provides anywhere some significant to massive performance improvement.
The improvement is most marked on large batches of generations where you only change a couple things (e.g. different seed or prompt for each iteration) and low-VRAM systems, where skipping an extraneous model load is a big deal.
## Overview
A new `invocation_cache` service is added to handle the caching. There's not much to it.
All nodes now inherit a boolean `use_cache` field from `BaseInvocation`. This is a node field and not a class attribute, because specific instances of nodes may want to opt in or out of caching.
The recently-added `invoke_internal()` method on `BaseInvocation` is used as an entrypoint for the cache logic.
To create a cache key, the invocation is first serialized using pydantic's provided `json()` method, skipping the unique `id` field. Then python's very fast builtin `hash()` is used to create an integer key. All implementations of `InvocationCacheBase` must provide a class method `create_key()` which accepts an invocation and outputs a string or integer key.
## In-Memory Implementation
An in-memory implementation is provided. In this implementation, the node outputs are stored in memory as python classes. The in-memory cache does not persist application restarts.
Max node cache size is added as `node_cache_size` under the `Generation` config category.
It defaults to 512 - this number is up for discussion, but given that these are relatively lightweight pydantic models, I think it's safe to up this even higher.
Note that the cache isn't storing the big stuff - tensors and images are store on disk, and outputs include only references to them.
## Node Definition
The default for all nodes is to use the cache. The `@invocation` decorator now accepts an optional `use_cache: bool` argument to override the default of `True`.
Non-deterministic nodes, however, should set this to `False`. Currently, all random-stuff nodes, including `dynamic_prompt`, are set to `False`.
The field name `use_cache` is now effectively a reserved field name and possibly a breaking change if any community nodes use this as a field name. In hindsight, all our reserved field names should have been prefixed with underscores or something.
## One Gotcha
Leaf nodes probably want to opt out of the cache, because if they are not cached, their outputs are not saved again.
If you run the same graph multiple times, you only end up with a single image output, because the image storage side-effects are in the `invoke()` method, which is bypassed if we have a cache hit.
## Linear UI
The linear graphs _almost_ just work, but due to the gotcha, we need to be careful about the final image-outputting node. To resolve this, a `SaveImageInvocation` node is added and used in the linear graphs.
This node is similar to `ImagePrimitive`, except it saves a copy of its input image, and has `use_cache` set to `False` by default.
This is now the leaf node in all linear graphs, and is the only node in those graphs with `use_cache == False` _and_ the only node with `is_intermedate == False`.
## Workflow Editor
All nodes now have a footer with a new `Use Cache [ ]` checkbox. It defaults to the value set by the invocation in its python definition, but can be changed by the user.
The workflow/node validation logic has been updated to migrate old workflows to use the new default values for `use_cache`. Users may still want to review the settings that have been chosen. In the event of catastrophic failure when running this migration, the default value of `True` is applied, as this is correct for most nodes.
Users should consider saving their workflows after loading them in and having them updated.
## Future Enhancements - Callback
A future enhancement would be to provide a callback to the `use_cache` flag that would be run as the node is executed to determine, based on its own internal state, if the cache should be used or not.
This would be useful for `DynamicPromptInvocation`, where the deterministic behaviour is determined by the `combinatorial: bool` field.
## Future Enhancements - Persisted Cache
Similar to how the latents storage is backed by disk, the invocation cache could be persisted to the database or disk. We'd need to be very careful about deserializing outputs, but it's perhaps worth exploring in the future.
* fix(ui): fix queue list item width
* feat(nodes): do not send the whole node on every generator progress
* feat(ui): strip out old logic related to sessions
Things like `isProcessing` are no longer relevant with queue. Removed them all & updated everything be appropriate for queue. May be a few little quirks I've missed...
* feat(ui): fix up param collapse labels
* feat(ui): click queue count to go to queue tab
* tidy(queue): update comment, query format
* feat(ui): fix progress bar when canceling
* fix(ui): fix circular dependency
* feat(nodes): bail on node caching logic if `node_cache_size == 0`
* feat(nodes): handle KeyError on node cache pop
* feat(nodes): bypass cache codepath if caches is disabled
more better no do thing
* fix(ui): reset api cache on connect/disconnect
* feat(ui): prevent enqueue when no prompts generated
* feat(ui): add queue controls to workflow editor
* feat(ui): update floating buttons & other incidental UI tweaks
* fix(ui): fix missing/incorrect translation keys
* fix(tests): add config service to mock invocation services
invoking needs access to `node_cache_size` to occur
* optionally remove pause/resume buttons from queue UI
* option to disable prepending
* chore(ui): remove unused file
* feat(queue): remove `order_id` entirely, `item_id` is now an autoinc pk
---------
Co-authored-by: Mary Hipp <maryhipp@Marys-MacBook-Air.local>
2023-09-20 05:09:24 +00:00
else :
2024-03-11 11:45:24 +00:00
new_config = config
2023-07-04 21:05:35 +00:00
2024-03-11 11:45:24 +00:00
for field_name in new_config . model_fields_set :
2024-03-11 14:39:08 +00:00
new_value = getattr ( new_config , field_name )
current_value = getattr ( self , field_name )
2024-03-15 08:38:42 +00:00
if field_name in self . model_fields_set and not clobber :
continue
2024-03-11 14:39:08 +00:00
if new_value != current_value :
setattr ( self , field_name , new_value )
2023-05-04 05:20:30 +00:00
2024-03-19 10:28:07 +00:00
def write_file ( self , dest_path : Path , as_example : bool = False ) - > None :
2024-03-11 14:45:12 +00:00
""" Write the current configuration to file. This will overwrite the existing file.
2023-05-04 05:20:30 +00:00
2024-03-11 11:45:24 +00:00
A ` meta ` stanza is added to the top of the file , containing metadata about the config file . This is not stored in the config object .
Args :
2024-03-11 14:45:12 +00:00
dest_path : Path to write the config to .
2024-03-11 11:45:24 +00:00
"""
2024-03-19 22:24:56 +00:00
dest_path . parent . mkdir ( parents = True , exist_ok = True )
2024-03-11 14:45:12 +00:00
with open ( dest_path , " w " ) as file :
2024-03-19 09:24:02 +00:00
# Meta fields should be written in a separate stanza - skip legacy_models_yaml_path
2024-03-15 12:21:21 +00:00
meta_dict = self . model_dump ( mode = " json " , include = { " schema_version " } )
# User settings
config_dict = self . model_dump (
mode = " json " ,
2024-03-19 10:28:07 +00:00
exclude_unset = False if as_example else True ,
exclude_defaults = False if as_example else True ,
exclude_none = True if as_example else False ,
2024-03-15 12:21:21 +00:00
exclude = { " schema_version " , " legacy_models_yaml_path " } ,
)
2024-03-19 10:28:07 +00:00
if as_example :
file . write (
" # This is an example file with default and example settings. Use the values here as a baseline. \n \n "
)
2024-03-11 22:16:04 +00:00
file . write ( " # Internal metadata - do not edit: \n " )
2024-03-11 11:45:24 +00:00
file . write ( yaml . dump ( meta_dict , sort_keys = False ) )
file . write ( " \n " )
2024-03-19 06:16:09 +00:00
file . write ( " # Put user settings here - see https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/: \n " )
2024-03-11 22:16:04 +00:00
if len ( config_dict ) > 0 :
file . write ( yaml . dump ( config_dict , sort_keys = False ) )
2024-03-11 11:45:24 +00:00
def _resolve ( self , partial_path : Path ) - > Path :
return ( self . root_path / partial_path ) . resolve ( )
2023-07-04 21:05:35 +00:00
2023-05-04 05:20:30 +00:00
@property
def root_path ( self ) - > Path :
2024-03-11 11:45:24 +00:00
""" Path to the runtime root directory, resolved to an absolute path. """
if self . _root :
root = Path ( self . _root ) . expanduser ( ) . absolute ( )
2023-05-04 05:20:30 +00:00
else :
2023-08-01 02:36:11 +00:00
root = self . find_root ( ) . expanduser ( ) . absolute ( )
2024-03-11 11:45:24 +00:00
self . _root = root # insulate ourselves from relative paths that may change
2023-12-22 12:30:51 +00:00
return root . resolve ( )
2023-05-04 05:20:30 +00:00
2023-05-30 17:49:43 +00:00
@property
2024-03-19 05:24:13 +00:00
def config_file_path ( self ) - > Path :
2024-03-11 11:45:24 +00:00
""" Path to invokeai.yaml, resolved to an absolute path.. """
2024-03-19 05:24:13 +00:00
resolved_path = self . _resolve ( self . _config_file or INIT_FILE )
2023-11-26 23:35:27 +00:00
assert resolved_path is not None
return resolved_path
2023-05-30 17:49:43 +00:00
2024-03-11 11:45:24 +00:00
@property
def outputs_path ( self ) - > Optional [ Path ] :
""" Path to the outputs directory, resolved to an absolute path.. """
return self . _resolve ( self . outputs_dir )
2023-05-04 05:20:30 +00:00
@property
2023-06-04 00:24:41 +00:00
def db_path ( self ) - > Path :
2024-03-11 11:45:24 +00:00
""" Path to the invokeai.db file, resolved to an absolute path.. """
2023-11-26 23:35:27 +00:00
db_dir = self . _resolve ( self . db_dir )
assert db_dir is not None
return db_dir / DB_FILE
2023-06-04 00:24:41 +00:00
2023-05-04 05:20:30 +00:00
@property
2023-12-04 22:08:33 +00:00
def legacy_conf_path ( self ) - > Path :
2024-03-11 11:45:24 +00:00
""" Path to directory of legacy configuration files (e.g. v1-inference.yaml), resolved to an absolute path.. """
2023-05-04 05:20:30 +00:00
return self . _resolve ( self . legacy_conf_dir )
@property
2023-12-04 22:08:33 +00:00
def models_path ( self ) - > Path :
2024-03-11 11:45:24 +00:00
""" Path to the models directory, resolved to an absolute path.. """
2023-06-09 03:11:53 +00:00
return self . _resolve ( self . models_dir )
2023-05-30 04:38:37 +00:00
2024-02-04 03:55:09 +00:00
@property
2024-03-11 11:45:24 +00:00
def convert_cache_path ( self ) - > Path :
""" Path to the converted cache models directory, resolved to an absolute path.. """
2024-02-04 03:55:09 +00:00
return self . _resolve ( self . convert_cache_dir )
2023-10-19 06:51:55 +00:00
@property
def custom_nodes_path ( self ) - > Path :
2024-03-11 11:45:24 +00:00
""" Path to the custom nodes directory, resolved to an absolute path.. """
2023-11-26 23:35:27 +00:00
custom_nodes_path = self . _resolve ( self . custom_nodes_dir )
assert custom_nodes_path is not None
return custom_nodes_path
2023-10-19 06:51:55 +00:00
2023-05-04 05:20:30 +00:00
@property
2024-03-11 11:45:24 +00:00
def profiles_path ( self ) - > Path :
""" Path to the graph profiles directory, resolved to an absolute path.. """
return self . _resolve ( self . profiles_dir )
2023-05-04 05:20:30 +00:00
2024-03-11 11:45:24 +00:00
@staticmethod
def find_root ( ) - > Path :
""" Choose the runtime root directory when not specified on command line or init file. """
if os . environ . get ( " INVOKEAI_ROOT " ) :
root = Path ( os . environ [ " INVOKEAI_ROOT " ] )
2024-04-03 04:19:52 +00:00
elif venv := os . environ . get ( " VIRTUAL_ENV " , None ) :
root = Path ( venv ) . parent . resolve ( )
2024-03-11 11:45:24 +00:00
else :
root = Path ( " ~/invokeai " ) . expanduser ( ) . resolve ( )
return root
2023-05-16 05:50:01 +00:00
2023-07-26 10:53:35 +00:00
2024-03-21 00:55:49 +00:00
class DefaultInvokeAIAppConfig ( InvokeAIAppConfig ) :
""" A version of `InvokeAIAppConfig` that does not automatically parse any settings from environment variables
or any file .
This is useful for writing out a default config file .
Note that init settings are set if provided .
"""
@classmethod
def settings_customise_sources (
cls ,
settings_cls : type [ BaseSettings ] ,
init_settings : PydanticBaseSettingsSource ,
env_settings : PydanticBaseSettingsSource ,
dotenv_settings : PydanticBaseSettingsSource ,
file_secret_settings : PydanticBaseSettingsSource ,
) - > tuple [ PydanticBaseSettingsSource , . . . ] :
return ( init_settings , )
2024-03-11 11:45:24 +00:00
def migrate_v3_config_dict ( config_dict : dict [ str , Any ] ) - > InvokeAIAppConfig :
2024-03-15 10:43:04 +00:00
""" Migrate a v3 config dictionary to a current config object.
2024-03-11 11:45:24 +00:00
Args :
config_dict : A dictionary of settings from a v3 config file .
Returns :
An instance of ` InvokeAIAppConfig ` with the migrated settings .
"""
parsed_config_dict : dict [ str , Any ] = { }
for _category_name , category_dict in config_dict [ " InvokeAI " ] . items ( ) :
for k , v in category_dict . items ( ) :
2024-03-11 21:39:38 +00:00
# `outdir` was renamed to `outputs_dir` in v4
2024-03-11 11:45:24 +00:00
if k == " outdir " :
parsed_config_dict [ " outputs_dir " ] = v
2024-03-11 21:39:38 +00:00
# `max_cache_size` was renamed to `ram` some time in v3, but both names were used
if k == " max_cache_size " and " ram " not in category_dict :
parsed_config_dict [ " ram " ] = v
# `max_vram_cache_size` was renamed to `vram` some time in v3, but both names were used
if k == " max_vram_cache_size " and " vram " not in category_dict :
parsed_config_dict [ " vram " ] = v
2024-04-15 13:12:49 +00:00
# autocast was removed in v4.0.1
if k == " precision " and v == " autocast " :
parsed_config_dict [ " precision " ] = " auto "
2024-03-15 10:43:04 +00:00
if k == " conf_path " :
parsed_config_dict [ " legacy_models_yaml_path " ] = v
2024-03-19 08:26:13 +00:00
if k == " legacy_conf_dir " :
2024-04-02 06:15:05 +00:00
# The old default for this was "configs/stable-diffusion" ("configs\stable-diffusion" on Windows).
if v == " configs/stable-diffusion " or v == " configs \\ stable-diffusion " :
# If if the incoming config has the default value, skip
continue
2024-03-19 08:26:13 +00:00
elif Path ( v ) . name == " stable-diffusion " :
2024-04-02 06:15:05 +00:00
# Else if the path ends in "stable-diffusion", we assume the parent is the new correct path.
2024-03-19 08:26:13 +00:00
parsed_config_dict [ " legacy_conf_dir " ] = str ( Path ( v ) . parent )
2024-04-02 06:15:05 +00:00
else :
# Else we do not attempt to migrate this setting
parsed_config_dict [ " legacy_conf_dir " ] = v
2024-03-11 11:45:24 +00:00
elif k in InvokeAIAppConfig . model_fields :
# skip unknown fields
parsed_config_dict [ k ] = v
2024-03-21 00:55:49 +00:00
# When migrating the config file, we should not include currently-set environment variables.
config = DefaultInvokeAIAppConfig . model_validate ( parsed_config_dict )
2024-03-15 10:43:04 +00:00
return config
2024-03-11 11:45:24 +00:00
2024-04-15 13:12:49 +00:00
def migrate_v4_0_0_config_dict ( config_dict : dict [ str , Any ] ) - > InvokeAIAppConfig :
""" Migrate v4.0.0 config dictionary to a current config object.
Args :
config_dict : A dictionary of settings from a v4 .0 .0 config file .
Returns :
An instance of ` InvokeAIAppConfig ` with the migrated settings .
"""
parsed_config_dict : dict [ str , Any ] = { }
for k , v in config_dict . items ( ) :
# autocast was removed from precision in v4.0.1
if k == " precision " and v == " autocast " :
parsed_config_dict [ " precision " ] = " auto "
else :
parsed_config_dict [ k ] = v
if k == " schema_version " :
parsed_config_dict [ k ] = CONFIG_SCHEMA_VERSION
config = DefaultInvokeAIAppConfig . model_validate ( parsed_config_dict )
return config
2024-03-11 11:45:24 +00:00
def load_and_migrate_config ( config_path : Path ) - > InvokeAIAppConfig :
""" Load and migrate a config file to the latest version.
Args :
config_path : Path to the config file .
Returns :
An instance of ` InvokeAIAppConfig ` with the loaded and migrated settings .
"""
assert config_path . suffix == " .yaml "
fix: use locale encoding
We have had a few bugs with v4 related to file encodings, especially on Windows.
Windows uses its own character encodings instead of `utf-8`, often `cp1252`. Some characters cannot be decoded using `utf-8`, causing `UnicodeDecodeError`.
There are a couple places where this can cause problems:
- In the installer bootstrap, we install or upgrade `pip` and decode the result, using `subprocess`.
The input to this includes the user's home dir. In #6105, the user had one of the problematic characters in their username. `subprocess` attempts and fails to decode the username, which crashes the installer.
To fix this, we need to use `locale.getpreferredencoding()` when executing the command.
- Similarly, in the model install service and config class, we attempt to load a yaml config file. If a problematic character is in the path to the file (which often includes the user's home dir), we can get the same error.
One example is #6129 in which the models.yaml migration fails.
To fix this, we need to open the file with `locale.getpreferredencoding()`.
2024-04-04 03:59:20 +00:00
with open ( config_path , " rt " , encoding = locale . getpreferredencoding ( ) ) as file :
2024-03-11 11:45:24 +00:00
loaded_config_dict = yaml . safe_load ( file )
assert isinstance ( loaded_config_dict , dict )
if " InvokeAI " in loaded_config_dict :
# This is a v3 config file, attempt to migrate it
2024-03-15 12:21:21 +00:00
shutil . copy ( config_path , config_path . with_suffix ( " .yaml.bak " ) )
2024-03-11 11:45:24 +00:00
try :
2024-03-21 00:55:49 +00:00
# loaded_config_dict could be the wrong shape, but we will catch all exceptions below
migrated_config = migrate_v3_config_dict ( loaded_config_dict ) # pyright: ignore [reportUnknownArgumentType]
2024-03-11 11:45:24 +00:00
except Exception as e :
2024-03-15 12:21:21 +00:00
shutil . copy ( config_path . with_suffix ( " .yaml.bak " ) , config_path )
2024-03-11 11:45:24 +00:00
raise RuntimeError ( f " Failed to load and migrate v3 config file { config_path } : { e } " ) from e
2024-03-21 00:55:49 +00:00
migrated_config . write_file ( config_path )
return migrated_config
2024-04-15 13:12:49 +00:00
if loaded_config_dict [ " schema_version " ] == " 4.0.0 " :
loaded_config_dict = migrate_v4_0_0_config_dict ( loaded_config_dict )
loaded_config_dict . write_file ( config_path )
# Attempt to load as a v4 config file
try :
# Meta is not included in the model fields, so we need to validate it separately
config = InvokeAIAppConfig . model_validate ( loaded_config_dict )
assert (
config . schema_version == CONFIG_SCHEMA_VERSION
) , f " Invalid schema version, expected { CONFIG_SCHEMA_VERSION } : { config . schema_version } "
return config
except Exception as e :
raise RuntimeError ( f " Failed to load config file { config_path } : { e } " ) from e
2024-03-11 11:45:24 +00:00
@lru_cache ( maxsize = 1 )
def get_config ( ) - > InvokeAIAppConfig :
2024-03-19 02:18:54 +00:00
""" Get the global singleton app config.
feat: single app entrypoint with CLI arg parsing
We have two problems with how argparse is being utilized:
- We parse CLI args as the `api_app.py` file is read. This causes a problem pytest, which has an incompatible set of CLI args. Some tests import the FastAPI app, which triggers the config to parse CLI args, which receives the pytest args and fails.
- We've repeatedly had problems when something that uses the config is imported before the CLI args are parsed. When this happens, the root dir may not be set correctly, so we attempt to operate on incorrect paths.
To resolve these issues, we need to lift CLI arg parsing outside of the application code, but still let the application access the CLI args. We can create a external app entrypoint to do this.
- `InvokeAIArgs` is a simple helper class that parses CLI args and stores the result.
- `run_app()` is the new entrypoint. It first parses CLI args, then runs `invoke_api` to start the app.
The `invokeai-web` project script and `invokeai-web.py` dev script now call `run_app()` instead of `invoke_api()`.
The first time `get_config()` is called to get the singleton config object, it retrieves the args from `InvokeAIArgs`, sets the root dir if provided, then merges settings in from `invokeai.yaml`.
CLI arg parsing is now safely insulated from application code, but still accessible. And we don't need to worry about import order having an impact on anything, because by the time the app is running, we have already parsed CLI args. Whew!
2024-03-15 05:33:52 +00:00
2024-03-19 02:18:54 +00:00
When first called , this function :
- Creates a config object . ` pydantic - settings ` handles merging of settings from environment variables , but not the init file .
- Retrieves any provided CLI args from the InvokeAIArgs class . It does not _parse_ the CLI args ; that is done in the main entrypoint .
- Sets the root dir , if provided via CLI args .
- Logs in to HF if there is no valid token already .
- Copies all legacy configs to the legacy conf dir ( needed for conversion from ckpt to diffusers ) .
- Reads and merges in settings from the config file if it exists , else writes out a default config file .
On subsequent calls , the object is returned from the cache .
feat: single app entrypoint with CLI arg parsing
We have two problems with how argparse is being utilized:
- We parse CLI args as the `api_app.py` file is read. This causes a problem pytest, which has an incompatible set of CLI args. Some tests import the FastAPI app, which triggers the config to parse CLI args, which receives the pytest args and fails.
- We've repeatedly had problems when something that uses the config is imported before the CLI args are parsed. When this happens, the root dir may not be set correctly, so we attempt to operate on incorrect paths.
To resolve these issues, we need to lift CLI arg parsing outside of the application code, but still let the application access the CLI args. We can create a external app entrypoint to do this.
- `InvokeAIArgs` is a simple helper class that parses CLI args and stores the result.
- `run_app()` is the new entrypoint. It first parses CLI args, then runs `invoke_api` to start the app.
The `invokeai-web` project script and `invokeai-web.py` dev script now call `run_app()` instead of `invoke_api()`.
The first time `get_config()` is called to get the singleton config object, it retrieves the args from `InvokeAIArgs`, sets the root dir if provided, then merges settings in from `invokeai.yaml`.
CLI arg parsing is now safely insulated from application code, but still accessible. And we don't need to worry about import order having an impact on anything, because by the time the app is running, we have already parsed CLI args. Whew!
2024-03-15 05:33:52 +00:00
"""
2024-03-21 00:55:49 +00:00
# This object includes environment variables, as parsed by pydantic-settings
feat: single app entrypoint with CLI arg parsing
We have two problems with how argparse is being utilized:
- We parse CLI args as the `api_app.py` file is read. This causes a problem pytest, which has an incompatible set of CLI args. Some tests import the FastAPI app, which triggers the config to parse CLI args, which receives the pytest args and fails.
- We've repeatedly had problems when something that uses the config is imported before the CLI args are parsed. When this happens, the root dir may not be set correctly, so we attempt to operate on incorrect paths.
To resolve these issues, we need to lift CLI arg parsing outside of the application code, but still let the application access the CLI args. We can create a external app entrypoint to do this.
- `InvokeAIArgs` is a simple helper class that parses CLI args and stores the result.
- `run_app()` is the new entrypoint. It first parses CLI args, then runs `invoke_api` to start the app.
The `invokeai-web` project script and `invokeai-web.py` dev script now call `run_app()` instead of `invoke_api()`.
The first time `get_config()` is called to get the singleton config object, it retrieves the args from `InvokeAIArgs`, sets the root dir if provided, then merges settings in from `invokeai.yaml`.
CLI arg parsing is now safely insulated from application code, but still accessible. And we don't need to worry about import order having an impact on anything, because by the time the app is running, we have already parsed CLI args. Whew!
2024-03-15 05:33:52 +00:00
config = InvokeAIAppConfig ( )
args = InvokeAIArgs . args
2024-03-19 04:53:04 +00:00
# This flag serves as a proxy for whether the config was retrieved in the context of the full application or not.
# If it is False, we should just return a default config and not set the root, log in to HF, etc.
if not InvokeAIArgs . did_parse :
return config
2024-03-19 05:24:13 +00:00
# Set CLI args
feat: single app entrypoint with CLI arg parsing
We have two problems with how argparse is being utilized:
- We parse CLI args as the `api_app.py` file is read. This causes a problem pytest, which has an incompatible set of CLI args. Some tests import the FastAPI app, which triggers the config to parse CLI args, which receives the pytest args and fails.
- We've repeatedly had problems when something that uses the config is imported before the CLI args are parsed. When this happens, the root dir may not be set correctly, so we attempt to operate on incorrect paths.
To resolve these issues, we need to lift CLI arg parsing outside of the application code, but still let the application access the CLI args. We can create a external app entrypoint to do this.
- `InvokeAIArgs` is a simple helper class that parses CLI args and stores the result.
- `run_app()` is the new entrypoint. It first parses CLI args, then runs `invoke_api` to start the app.
The `invokeai-web` project script and `invokeai-web.py` dev script now call `run_app()` instead of `invoke_api()`.
The first time `get_config()` is called to get the singleton config object, it retrieves the args from `InvokeAIArgs`, sets the root dir if provided, then merges settings in from `invokeai.yaml`.
CLI arg parsing is now safely insulated from application code, but still accessible. And we don't need to worry about import order having an impact on anything, because by the time the app is running, we have already parsed CLI args. Whew!
2024-03-15 05:33:52 +00:00
if root := getattr ( args , " root " , None ) :
2024-03-19 05:24:13 +00:00
config . _root = Path ( root )
if config_file := getattr ( args , " config_file " , None ) :
config . _config_file = Path ( config_file )
2024-03-19 02:18:54 +00:00
2024-03-21 00:55:49 +00:00
# Create the example config file, with some extra example values provided
example_config = DefaultInvokeAIAppConfig ( )
2024-03-19 10:28:07 +00:00
example_config . remote_api_tokens = [
URLRegexTokenPair ( url_regex = " cool-models.com " , token = " my_secret_token " ) ,
URLRegexTokenPair ( url_regex = " nifty-models.com " , token = " some_other_token " ) ,
]
example_config . write_file ( config . config_file_path . with_suffix ( " .example.yaml " ) , as_example = True )
2024-03-19 02:18:54 +00:00
# Copy all legacy configs - We know `__path__[0]` is correct here
configs_src = Path ( model_configs . __path__ [ 0 ] ) # pyright: ignore [reportUnknownMemberType, reportUnknownArgumentType, reportAttributeAccessIssue]
shutil . copytree ( configs_src , config . legacy_conf_path , dirs_exist_ok = True )
2024-03-19 05:24:13 +00:00
if config . config_file_path . exists ( ) :
2024-03-21 00:55:49 +00:00
config_from_file = load_and_migrate_config ( config . config_file_path )
2024-03-19 05:24:13 +00:00
# Clobbering here will overwrite any settings that were set via environment variables
2024-03-21 00:55:49 +00:00
config . update_config ( config_from_file , clobber = False )
2024-03-19 02:18:54 +00:00
else :
2024-03-21 00:55:49 +00:00
# We should never write env vars to the config file
default_config = DefaultInvokeAIAppConfig ( )
default_config . write_file ( config . config_file_path , as_example = False )
feat: single app entrypoint with CLI arg parsing
We have two problems with how argparse is being utilized:
- We parse CLI args as the `api_app.py` file is read. This causes a problem pytest, which has an incompatible set of CLI args. Some tests import the FastAPI app, which triggers the config to parse CLI args, which receives the pytest args and fails.
- We've repeatedly had problems when something that uses the config is imported before the CLI args are parsed. When this happens, the root dir may not be set correctly, so we attempt to operate on incorrect paths.
To resolve these issues, we need to lift CLI arg parsing outside of the application code, but still let the application access the CLI args. We can create a external app entrypoint to do this.
- `InvokeAIArgs` is a simple helper class that parses CLI args and stores the result.
- `run_app()` is the new entrypoint. It first parses CLI args, then runs `invoke_api` to start the app.
The `invokeai-web` project script and `invokeai-web.py` dev script now call `run_app()` instead of `invoke_api()`.
The first time `get_config()` is called to get the singleton config object, it retrieves the args from `InvokeAIArgs`, sets the root dir if provided, then merges settings in from `invokeai.yaml`.
CLI arg parsing is now safely insulated from application code, but still accessible. And we don't need to worry about import order having an impact on anything, because by the time the app is running, we have already parsed CLI args. Whew!
2024-03-15 05:33:52 +00:00
return config