From d04c880cce7cc7bada48dcf72129ce25a8fa7582 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 16 Apr 2024 17:57:40 -0400 Subject: [PATCH] fix ValueError on model manager install --- .../model_manager/load/model_cache/model_cache_default.py | 2 +- invokeai/backend/util/devices.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py index 551412d66a..910087c4bb 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -123,7 +123,7 @@ class ModelCache(ModelCacheBase[AnyModel]): assert current_thread is not None assigned = [x for x, tid in self._execution_devices.items() if current_thread == tid] if not assigned: - raise ValueError("No GPU has been reserved for the use of thread {current_thread}") + raise ValueError(f"No GPU has been reserved for the use of thread {current_thread}") return assigned[0] @contextmanager diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py index 745c128099..c7db33a667 100644 --- a/invokeai/backend/util/devices.py +++ b/invokeai/backend/util/devices.py @@ -59,7 +59,13 @@ class TorchDevice: def choose_torch_device(cls) -> torch.device: """Return the torch.device to use for accelerated inference.""" if cls._model_cache: - return cls._model_cache.get_execution_device() + try: + return cls._model_cache.get_execution_device() + except ValueError as e: # May happen if no gpu was reserved. Return a generic device. + if str(e).startswith("No GPU has been reserved"): + pass + else: + raise e app_config = get_config() if app_config.device != "auto": device = torch.device(app_config.device)