add debugging messages to aid in memory leak tracking

This commit is contained in:
Lincoln Stein 2023-07-02 13:34:53 -04:00
parent 2b67509061
commit 6935858ef3

View File

@ -352,7 +352,9 @@ class ModelCache(object):
for model_key, cache_entry in self._cached_models.items():
if not cache_entry.locked and cache_entry.loaded:
self.logger.debug(f'Offloading {model_key} from {self.execution_device} into {self.storage_device}')
cache_entry.model.to(self.storage_device)
with VRAMUsage() as mem:
cache_entry.model.to(self.storage_device)
self.logger.debug(f'GPU VRAM freed: {(mem.vram_used/GIG):.2f} GB')
def _local_model_hash(self, model_path: Union[str, Path]) -> str:
sha = hashlib.sha256()