mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
add always_use_cpu arg to bypass MPS
This commit is contained in:
parent
e0495a7440
commit
f48706efee
@ -47,6 +47,7 @@ def main():
|
|||||||
# alert - setting globals here
|
# alert - setting globals here
|
||||||
Globals.root = os.path.expanduser(args.root_dir or os.environ.get('INVOKEAI_ROOT') or os.path.abspath('.'))
|
Globals.root = os.path.expanduser(args.root_dir or os.environ.get('INVOKEAI_ROOT') or os.path.abspath('.'))
|
||||||
Globals.try_patchmatch = args.patchmatch
|
Globals.try_patchmatch = args.patchmatch
|
||||||
|
Globals.always_use_cpu = args.always_use_cpu
|
||||||
|
|
||||||
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
print(f'>> InvokeAI runtime directory is "{Globals.root}"')
|
||||||
|
|
||||||
|
@ -455,6 +455,12 @@ class Args(object):
|
|||||||
action='store_true',
|
action='store_true',
|
||||||
help='Force free gpu memory before final decoding',
|
help='Force free gpu memory before final decoding',
|
||||||
)
|
)
|
||||||
|
model_group.add_argument(
|
||||||
|
"--always_use_cpu",
|
||||||
|
dest="always_use_cpu",
|
||||||
|
action="store_true",
|
||||||
|
help="Force use of CPU even if GPU is available"
|
||||||
|
)
|
||||||
model_group.add_argument(
|
model_group.add_argument(
|
||||||
'--precision',
|
'--precision',
|
||||||
dest='precision',
|
dest='precision',
|
||||||
|
@ -1,9 +1,12 @@
|
|||||||
import torch
|
import torch
|
||||||
from torch import autocast
|
from torch import autocast
|
||||||
from contextlib import nullcontext
|
from contextlib import nullcontext
|
||||||
|
from ldm.invoke.globals import Globals
|
||||||
|
|
||||||
def choose_torch_device() -> str:
|
def choose_torch_device() -> str:
|
||||||
'''Convenience routine for guessing which GPU device to run model on'''
|
'''Convenience routine for guessing which GPU device to run model on'''
|
||||||
|
if Globals.always_use_cpu:
|
||||||
|
return "cpu"
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
return 'cuda'
|
return 'cuda'
|
||||||
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
|
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
|
||||||
|
@ -8,6 +8,7 @@ the attributes:
|
|||||||
- root - the root directory under which "models" and "outputs" can be found
|
- root - the root directory under which "models" and "outputs" can be found
|
||||||
- initfile - path to the initialization file
|
- initfile - path to the initialization file
|
||||||
- try_patchmatch - option to globally disable loading of 'patchmatch' module
|
- try_patchmatch - option to globally disable loading of 'patchmatch' module
|
||||||
|
- always_use_cpu - force use of CPU even if GPU is available
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@ -24,3 +25,6 @@ Globals.initfile = os.path.expanduser('~/.invokeai')
|
|||||||
# Awkward workaround to disable attempted loading of pypatchmatch
|
# Awkward workaround to disable attempted loading of pypatchmatch
|
||||||
# which is causing CI tests to error out.
|
# which is causing CI tests to error out.
|
||||||
Globals.try_patchmatch = True
|
Globals.try_patchmatch = True
|
||||||
|
|
||||||
|
# Use CPU even if GPU is available (main use case is for debugging MPS issues)
|
||||||
|
Globals.always_use_cpu = False
|
||||||
|
Loading…
Reference in New Issue
Block a user