made MPS calls conditional on MPS actually being the chosen device with backend available

This commit is contained in:
Ryan
2023-09-11 00:44:43 -04:00
committed by Kent Keirsey
parent fab055995e
commit b7296000e4
2 changed files with 23 additions and 9 deletions

View File

@ -30,8 +30,12 @@ from torch import mps
import invokeai.backend.util.logging as logger
from ..util.devices import choose_torch_device
from .models import BaseModelType, ModelBase, ModelType, SubModelType
if choose_torch_device() == torch.device("mps"):
from torch import mps
# Maximum size of the cache, in gigs
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
DEFAULT_MAX_CACHE_SIZE = 6.0
@ -407,7 +411,8 @@ class ModelCache(object):
gc.collect()
torch.cuda.empty_cache()
mps.empty_cache()
if choose_torch_device() == torch.device("mps"):
mps.empty_cache()
self.logger.debug(f"After unloading: cached_models={len(self._cached_models)}")
@ -428,7 +433,8 @@ class ModelCache(object):
gc.collect()
torch.cuda.empty_cache()
mps.empty_cache()
if choose_torch_device() == torch.device("mps"):
mps.empty_cache()
def _local_model_hash(self, model_path: Union[str, Path]) -> str:
sha = hashlib.sha256()