mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
0bc6779361
When running on just cpu (intel), a call to torch.layer_norm would error with RuntimeError: expected scalar type BFloat16 but found Float Fix buggy device handling in model.py. Tested with scripts/dream.py --full_precision on just cpu on intel laptop. Works but slow at ~10s/it.
22 lines
714 B
Python
22 lines
714 B
Python
import torch
|
|
from torch import autocast
|
|
from contextlib import contextmanager, nullcontext
|
|
|
|
def choose_torch_device() -> str:
|
|
'''Convenience routine for guessing which GPU device to run model on'''
|
|
if torch.cuda.is_available():
|
|
return 'cuda'
|
|
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
|
|
return 'mps'
|
|
return 'cpu'
|
|
|
|
def choose_autocast_device(device):
|
|
'''Returns an autocast compatible device from a torch device'''
|
|
device_type = device.type # this returns 'mps' on M1
|
|
if device_type == 'cuda':
|
|
return device_type,autocast
|
|
elif device_type == 'cpu':
|
|
return device_type,nullcontext
|
|
else:
|
|
return 'cpu',nullcontext
|