From 40f9e49b5e5a05cb00f633dac61fdc9a95cd0903 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 11 Oct 2023 11:08:36 -0400 Subject: [PATCH] Demote model cache logs from warning to debug based on the conversation here: https://discord.com/channels/1020123559063990373/1049495067846524939/1161647290189090816 --- invokeai/backend/model_management/model_cache.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 9cf3852449..2a6079b2c4 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -238,11 +238,8 @@ class ModelCache(object): f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}" ) - # We only log a warning for over-reported (not under-reported) model sizes before load. There is a known - # issue where models report their fp32 size before load, and are then loaded as fp16. Once this issue is - # addressed, it would make sense to log a warning for both over-reported and under-reported model sizes. - if (self_reported_model_size_after_load - self_reported_model_size_before_load) > 10 * MB: - self.logger.warning( + if abs(self_reported_model_size_after_load - self_reported_model_size_before_load) > 10 * MB: + self.logger.debug( f"Model '{key}' mis-reported its size before load. Self-reported size before/after load:" f" {(self_reported_model_size_before_load/GIG):.2f}GB /" f" {(self_reported_model_size_after_load/GIG):.2f}GB." @@ -299,7 +296,7 @@ class ModelCache(object): rel_tol=0.1, abs_tol=10 * MB, ): - self.logger.warning( + self.logger.debug( f"Moving model '{key}' from {source_device} to" f" {target_device} caused an unexpected change in VRAM usage. The model's" " estimated size may be incorrect. Estimated model size:"