mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix ValueError on model manager install
This commit is contained in:
parent
763a2e2632
commit
d04c880cce
@ -123,7 +123,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
assert current_thread is not None
|
||||
assigned = [x for x, tid in self._execution_devices.items() if current_thread == tid]
|
||||
if not assigned:
|
||||
raise ValueError("No GPU has been reserved for the use of thread {current_thread}")
|
||||
raise ValueError(f"No GPU has been reserved for the use of thread {current_thread}")
|
||||
return assigned[0]
|
||||
|
||||
@contextmanager
|
||||
|
@ -59,7 +59,13 @@ class TorchDevice:
|
||||
def choose_torch_device(cls) -> torch.device:
|
||||
"""Return the torch.device to use for accelerated inference."""
|
||||
if cls._model_cache:
|
||||
return cls._model_cache.get_execution_device()
|
||||
try:
|
||||
return cls._model_cache.get_execution_device()
|
||||
except ValueError as e: # May happen if no gpu was reserved. Return a generic device.
|
||||
if str(e).startswith("No GPU has been reserved"):
|
||||
pass
|
||||
else:
|
||||
raise e
|
||||
app_config = get_config()
|
||||
if app_config.device != "auto":
|
||||
device = torch.device(app_config.device)
|
||||
|
Loading…
Reference in New Issue
Block a user