From 4a683cc669992bb23ff43bea48799aa879e2d3cf Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 2 Nov 2023 12:07:48 -0400 Subject: [PATCH] Add a app config parameter to control the ModelCache logging behavior. --- invokeai/app/services/config/config_default.py | 3 +++ invokeai/backend/model_management/model_manager.py | 7 +++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py index a877c465d2..23436a9172 100644 --- a/invokeai/app/services/config/config_default.py +++ b/invokeai/app/services/config/config_default.py @@ -45,6 +45,7 @@ InvokeAI: ram: 13.5 vram: 0.25 lazy_offload: true + log_memory_usage: false Device: device: auto precision: auto @@ -261,6 +262,8 @@ class InvokeAIAppConfig(InvokeAISettings): ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, ) + log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature unless you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache) + # DEVICE device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 9390c8ce54..e63b559970 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -351,6 +351,7 @@ class ModelManager(object): precision=precision, sequential_offload=sequential_offload, logger=logger, + log_memory_usage=self.app_config.log_memory_usage, ) self._read_models(config) @@ -933,8 +934,7 @@ class ModelManager(object): """ Returns the preamble for the config file. """ - return textwrap.dedent( - """ + return textwrap.dedent(""" # This file describes the alternative machine learning models # available to InvokeAI script. # @@ -942,8 +942,7 @@ class ModelManager(object): # model requires a model config file, a weights file, # and the width and height of the images it # was trained on. - """ - ) + """) def scan_models_directory( self,