mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
made MPS calls conditional on MPS actually being the chosen device with backend available
This commit is contained in:
@ -30,8 +30,12 @@ from torch import mps
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
|
||||
from ..util.devices import choose_torch_device
|
||||
from .models import BaseModelType, ModelBase, ModelType, SubModelType
|
||||
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
from torch import mps
|
||||
|
||||
# Maximum size of the cache, in gigs
|
||||
# Default is roughly enough to hold three fp16 diffusers models in RAM simultaneously
|
||||
DEFAULT_MAX_CACHE_SIZE = 6.0
|
||||
@ -407,7 +411,8 @@ class ModelCache(object):
|
||||
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
mps.empty_cache()
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
mps.empty_cache()
|
||||
|
||||
self.logger.debug(f"After unloading: cached_models={len(self._cached_models)}")
|
||||
|
||||
@ -428,7 +433,8 @@ class ModelCache(object):
|
||||
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
mps.empty_cache()
|
||||
if choose_torch_device() == torch.device("mps"):
|
||||
mps.empty_cache()
|
||||
|
||||
def _local_model_hash(self, model_path: Union[str, Path]) -> str:
|
||||
sha = hashlib.sha256()
|
||||
|
Reference in New Issue
Block a user