add always_use_cpu arg to bypass MPS

This commit is contained in:
Damian Stewart 2022-12-04 15:15:39 +01:00
parent e0495a7440
commit f48706efee
5 changed files with 41 additions and 27 deletions

View File

@ -47,6 +47,7 @@ def main():
# alert - setting globals here
Globals.root = os.path.expanduser(args.root_dir or os.environ.get('INVOKEAI_ROOT') or os.path.abspath('.'))
Globals.try_patchmatch = args.patchmatch
Globals.always_use_cpu = args.always_use_cpu
print(f'>> InvokeAI runtime directory is "{Globals.root}"')

View File

@ -455,6 +455,12 @@ class Args(object):
action='store_true',
help='Force free gpu memory before final decoding',
)
model_group.add_argument(
"--always_use_cpu",
dest="always_use_cpu",
action="store_true",
help="Force use of CPU even if GPU is available"
)
model_group.add_argument(
'--precision',
dest='precision',

View File

@ -1,9 +1,12 @@
import torch
from torch import autocast
from contextlib import nullcontext
from ldm.invoke.globals import Globals
def choose_torch_device() -> str:
'''Convenience routine for guessing which GPU device to run model on'''
if Globals.always_use_cpu:
return "cpu"
if torch.cuda.is_available():
return 'cuda'
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():

View File

@ -8,6 +8,7 @@ the attributes:
- root - the root directory under which "models" and "outputs" can be found
- initfile - path to the initialization file
- try_patchmatch - option to globally disable loading of 'patchmatch' module
- always_use_cpu - force use of CPU even if GPU is available
'''
import os
@ -24,3 +25,6 @@ Globals.initfile = os.path.expanduser('~/.invokeai')
# Awkward workaround to disable attempted loading of pypatchmatch
# which is causing CI tests to error out.
Globals.try_patchmatch = True
# Use CPU even if GPU is available (main use case is for debugging MPS issues)
Globals.always_use_cpu = False