2022-08-31 04:33:23 +00:00
|
|
|
import torch
|
2022-09-06 00:40:10 +00:00
|
|
|
from torch import autocast
|
|
|
|
from contextlib import contextmanager, nullcontext
|
2022-08-31 04:33:23 +00:00
|
|
|
|
|
|
|
def choose_torch_device() -> str:
|
|
|
|
'''Convenience routine for guessing which GPU device to run model on'''
|
|
|
|
if torch.cuda.is_available():
|
|
|
|
return 'cuda'
|
2022-08-31 14:56:38 +00:00
|
|
|
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
|
2022-08-31 04:33:23 +00:00
|
|
|
return 'mps'
|
|
|
|
return 'cpu'
|
|
|
|
|
2022-09-06 00:40:10 +00:00
|
|
|
def choose_autocast_device(device):
|
2022-09-01 05:21:14 +00:00
|
|
|
'''Returns an autocast compatible device from a torch device'''
|
|
|
|
device_type = device.type # this returns 'mps' on M1
|
2022-09-15 23:29:19 +00:00
|
|
|
# autocast only for cuda, but GTX 16xx have issues with it
|
2022-09-12 20:55:21 +00:00
|
|
|
if device_type == 'cuda':
|
2022-09-15 23:29:19 +00:00
|
|
|
device_name = torch.cuda.get_device_name()
|
|
|
|
if 'GeForce GTX 1660' in device_name or 'GeForce GTX 1650' in device_name:
|
|
|
|
return device_type,nullcontext
|
|
|
|
else:
|
|
|
|
return device_type,autocast
|
2022-09-06 00:40:10 +00:00
|
|
|
else:
|
|
|
|
return 'cpu',nullcontext
|