From 7fa5bae8fd7154a877a35483a4ece03065840b18 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 29 Sep 2023 08:59:51 -0400 Subject: [PATCH] Add warning log if moving model from RAM to VRAM causes an unexpected change in VRAM usage. --- invokeai/backend/model_management/model_cache.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 62e95ce021..8ea82e2caa 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -307,6 +307,20 @@ class ModelCache(object): f" {get_pretty_snapshot_diff(snapshot_before, snapshot_after)}." ) + if not math.isclose( + abs((snapshot_before.vram or 0) - (snapshot_after.vram or 0)), + self.cache_entry.size, + rel_tol=0.1, + abs_tol=10 * MB, + ): + self.cache.logger.warning( + f"Moving '{self.key}' from {self.cache.storage_device} to" + f" {self.cache.execution_device} caused an unexpected change in VRAM usage. The model's" + " estimated size may be incorrect. Estimated model size:" + f" {(self.cache_entry.size/GIG):.2f} GB." + f" {get_pretty_snapshot_diff(snapshot_before, snapshot_after)}." + ) + self.cache.logger.debug(f"Locking {self.key} in {self.cache.execution_device}") self.cache._print_cuda_stats()