diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py index 551412d66a..910087c4bb 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -123,7 +123,7 @@ class ModelCache(ModelCacheBase[AnyModel]): assert current_thread is not None assigned = [x for x, tid in self._execution_devices.items() if current_thread == tid] if not assigned: - raise ValueError("No GPU has been reserved for the use of thread {current_thread}") + raise ValueError(f"No GPU has been reserved for the use of thread {current_thread}") return assigned[0] @contextmanager diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py index 745c128099..c7db33a667 100644 --- a/invokeai/backend/util/devices.py +++ b/invokeai/backend/util/devices.py @@ -59,7 +59,13 @@ class TorchDevice: def choose_torch_device(cls) -> torch.device: """Return the torch.device to use for accelerated inference.""" if cls._model_cache: - return cls._model_cache.get_execution_device() + try: + return cls._model_cache.get_execution_device() + except ValueError as e: # May happen if no gpu was reserved. Return a generic device. + if str(e).startswith("No GPU has been reserved"): + pass + else: + raise e app_config = get_config() if app_config.device != "auto": device = torch.device(app_config.device)