fix ValueError on model manager install

This commit is contained in:
Lincoln Stein 2024-04-16 17:57:40 -04:00
parent 763a2e2632
commit d04c880cce
2 changed files with 8 additions and 2 deletions

View File

@ -123,7 +123,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
assert current_thread is not None assert current_thread is not None
assigned = [x for x, tid in self._execution_devices.items() if current_thread == tid] assigned = [x for x, tid in self._execution_devices.items() if current_thread == tid]
if not assigned: if not assigned:
raise ValueError("No GPU has been reserved for the use of thread {current_thread}") raise ValueError(f"No GPU has been reserved for the use of thread {current_thread}")
return assigned[0] return assigned[0]
@contextmanager @contextmanager

View File

@ -59,7 +59,13 @@ class TorchDevice:
def choose_torch_device(cls) -> torch.device: def choose_torch_device(cls) -> torch.device:
"""Return the torch.device to use for accelerated inference.""" """Return the torch.device to use for accelerated inference."""
if cls._model_cache: if cls._model_cache:
return cls._model_cache.get_execution_device() try:
return cls._model_cache.get_execution_device()
except ValueError as e: # May happen if no gpu was reserved. Return a generic device.
if str(e).startswith("No GPU has been reserved"):
pass
else:
raise e
app_config = get_config() app_config = get_config()
if app_config.device != "auto": if app_config.device != "auto":
device = torch.device(app_config.device) device = torch.device(app_config.device)