From 8d7dba937d85f38acfc17a6d942a66a6a7c69d52 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 9 Jul 2023 14:37:45 -0400 Subject: [PATCH] fix undefined variable --- invokeai/backend/model_management/model_cache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index ad95a02e52..ecda15beac 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -348,8 +348,8 @@ class ModelCache(object): def _offload_unlocked_models(self, size_needed: int=0): for model_key, cache_entry in sorted(self._cached_models.items(), key=lambda x:x[1].size): free_mem, used_mem = torch.cuda.mem_get_info() - self.logger.debug(f'Require {(size_needed/GIG):.2f}GB VRAM. Have {((free_mem-reserve)/GIG):.2f}GB available.') - if free_mem-reserve > size_needed: + self.logger.debug(f'Require {(size_needed/GIG):.2f}GB VRAM. Have {(free_mem/GIG):.2f}GB available.') + if free_mem > size_needed: return if not cache_entry.locked and cache_entry.loaded: self.logger.debug(f'Offloading {model_key} from {self.execution_device} into {self.storage_device}')