mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Add warning log if moving model from RAM to VRAM causes an unexpected change in VRAM usage.
This commit is contained in:
parent
f9faca7c91
commit
7fa5bae8fd
@ -307,6 +307,20 @@ class ModelCache(object):
|
||||
f" {get_pretty_snapshot_diff(snapshot_before, snapshot_after)}."
|
||||
)
|
||||
|
||||
if not math.isclose(
|
||||
abs((snapshot_before.vram or 0) - (snapshot_after.vram or 0)),
|
||||
self.cache_entry.size,
|
||||
rel_tol=0.1,
|
||||
abs_tol=10 * MB,
|
||||
):
|
||||
self.cache.logger.warning(
|
||||
f"Moving '{self.key}' from {self.cache.storage_device} to"
|
||||
f" {self.cache.execution_device} caused an unexpected change in VRAM usage. The model's"
|
||||
" estimated size may be incorrect. Estimated model size:"
|
||||
f" {(self.cache_entry.size/GIG):.2f} GB."
|
||||
f" {get_pretty_snapshot_diff(snapshot_before, snapshot_after)}."
|
||||
)
|
||||
|
||||
self.cache.logger.debug(f"Locking {self.key} in {self.cache.execution_device}")
|
||||
self.cache._print_cuda_stats()
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user