From 21a60af8812af33d219bd0a0d6b4a916da1967f5 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 28 May 2024 23:01:21 -0400 Subject: [PATCH] when unlocking models, offload_unlocked_models should prune to vram limit only (#6450) Co-authored-by: Lincoln Stein --- invokeai/backend/model_manager/load/model_cache/model_locker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_manager/load/model_cache/model_locker.py b/invokeai/backend/model_manager/load/model_cache/model_locker.py index a275987773..269ac60479 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_locker.py +++ b/invokeai/backend/model_manager/load/model_cache/model_locker.py @@ -60,5 +60,5 @@ class ModelLocker(ModelLockerBase): self._cache_entry.unlock() if not self._cache.lazy_offloading: - self._cache.offload_unlocked_models(self._cache_entry.size) + self._cache.offload_unlocked_models(0) self._cache.print_cuda_stats()