From 6e7a3f05467c45a5418405ac4ff01412cd8b6c2d Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 2 Nov 2023 13:31:10 -0400 Subject: [PATCH] (minor) Fix static checks and typo. --- invokeai/app/services/config/config_default.py | 3 +-- invokeai/backend/model_management/model_manager.py | 6 ++++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py index 23436a9172..f0e9dbcda4 100644 --- a/invokeai/app/services/config/config_default.py +++ b/invokeai/app/services/config/config_default.py @@ -262,8 +262,7 @@ class InvokeAIAppConfig(InvokeAISettings): ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, ) lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, ) - log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature unless you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache) - + log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache) # DEVICE device : Literal["auto", "cpu", "cuda", "cuda:1", "mps"] = Field(default="auto", description="Generation device", json_schema_extra=Categories.Device) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index e63b559970..da4239fa07 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -934,7 +934,8 @@ class ModelManager(object): """ Returns the preamble for the config file. """ - return textwrap.dedent(""" + return textwrap.dedent( + """ # This file describes the alternative machine learning models # available to InvokeAI script. # @@ -942,7 +943,8 @@ class ModelManager(object): # model requires a model config file, a weights file, # and the width and height of the images it # was trained on. - """) + """ + ) def scan_models_directory( self,