From 46d23cd868590bfd951f17ca04d2ee8fc4cf209c Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 10 Apr 2024 23:44:14 -0400 Subject: [PATCH] catch RunTimeError during model `to()` call rather than OutOfMemoryError --- .../model_manager/load/model_cache/model_cache_default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py index e48be7c008..14e7482050 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -273,7 +273,7 @@ class ModelCache(ModelCacheBase[AnyModel]): snapshot_before = self._capture_memory_snapshot() try: cache_entry.model.to(target_device) - except torch.cuda.OutOfMemoryError as e: # blow away cache entry + except RuntimeError as e: # blow away cache entry self._delete_cache_entry(cache_entry) raise e