mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Demote model cache logs from warning to debug based on the conversation here: https://discord.com/channels/1020123559063990373/1049495067846524939/1161647290189090816
This commit is contained in:
parent
98fa234529
commit
40f9e49b5e
@ -238,11 +238,8 @@ class ModelCache(object):
|
||||
f"{get_pretty_snapshot_diff(snapshot_before, snapshot_after)}"
|
||||
)
|
||||
|
||||
# We only log a warning for over-reported (not under-reported) model sizes before load. There is a known
|
||||
# issue where models report their fp32 size before load, and are then loaded as fp16. Once this issue is
|
||||
# addressed, it would make sense to log a warning for both over-reported and under-reported model sizes.
|
||||
if (self_reported_model_size_after_load - self_reported_model_size_before_load) > 10 * MB:
|
||||
self.logger.warning(
|
||||
if abs(self_reported_model_size_after_load - self_reported_model_size_before_load) > 10 * MB:
|
||||
self.logger.debug(
|
||||
f"Model '{key}' mis-reported its size before load. Self-reported size before/after load:"
|
||||
f" {(self_reported_model_size_before_load/GIG):.2f}GB /"
|
||||
f" {(self_reported_model_size_after_load/GIG):.2f}GB."
|
||||
@ -299,7 +296,7 @@ class ModelCache(object):
|
||||
rel_tol=0.1,
|
||||
abs_tol=10 * MB,
|
||||
):
|
||||
self.logger.warning(
|
||||
self.logger.debug(
|
||||
f"Moving model '{key}' from {source_device} to"
|
||||
f" {target_device} caused an unexpected change in VRAM usage. The model's"
|
||||
" estimated size may be incorrect. Estimated model size:"
|
||||
|
Loading…
x
Reference in New Issue
Block a user