mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Add warning log if model mis-reports its required cache memory before load from disk.
This commit is contained in:
parent
594fd3ba6d
commit
f9faca7c91
@ -18,6 +18,7 @@ context. Use like this:
|
||||
|
||||
import gc
|
||||
import hashlib
|
||||
import math
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
@ -46,6 +47,8 @@ DEFAULT_MAX_VRAM_CACHE_SIZE = 2.75
|
||||
|
||||
# actual size of a gig
|
||||
GIG = 1073741824
|
||||
# Size of a MB in bytes.
|
||||
MB = 2**20
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -233,6 +236,15 @@ class ModelCache(object):
|
||||
f" {get_pretty_snapshot_diff(snapshot_before, snapshot_after)}."
|
||||
)
|
||||
|
||||
if not math.isclose(
|
||||
self_reported_model_size_before_load, self_reported_model_size_after_load, abs_tol=10 * MB
|
||||
):
|
||||
self.logger.warning(
|
||||
f"Model '{key}' mis-reported its size before load. Self-reported size before/after load:"
|
||||
f" {(self_reported_model_size_before_load/GIG):.2f}GB /"
|
||||
f" {(self_reported_model_size_after_load/GIG):.2f}GB."
|
||||
)
|
||||
|
||||
cache_entry = _CacheRecord(self, model, self_reported_model_size_after_load)
|
||||
self._cached_models[key] = cache_entry
|
||||
else:
|
||||
|
Loading…
Reference in New Issue
Block a user