Add a app config parameter to control the ModelCache logging behavior.

This commit is contained in:
Ryan Dick 2023-11-02 12:07:48 -04:00 committed by Kent Keirsey
parent 3781e56e57
commit 4a683cc669
2 changed files with 6 additions and 4 deletions

View File

@ -45,6 +45,7 @@ InvokeAI:
ram: 13.5
vram: 0.25
lazy_offload: true
log_memory_usage: false
Device:
device: auto
precision: auto
@ -261,6 +262,8 @@ class InvokeAIAppConfig(InvokeAISettings):
ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, )
vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, )
lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, )
log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature unless you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache)
# DEVICE
device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device)

View File

@ -351,6 +351,7 @@ class ModelManager(object):
precision=precision,
sequential_offload=sequential_offload,
logger=logger,
log_memory_usage=self.app_config.log_memory_usage,
)
self._read_models(config)
@ -933,8 +934,7 @@ class ModelManager(object):
"""
Returns the preamble for the config file.
"""
return textwrap.dedent(
"""
return textwrap.dedent("""
# This file describes the alternative machine learning models
# available to InvokeAI script.
#
@ -942,8 +942,7 @@ class ModelManager(object):
# model requires a model config file, a weights file,
# and the width and height of the images it
# was trained on.
"""
)
""")
def scan_models_directory(
self,