diff --git a/invokeai/backend/model_management/model_cache.py b/invokeai/backend/model_management/model_cache.py index 419c2c8ac2..68ebff7862 100644 --- a/invokeai/backend/model_management/model_cache.py +++ b/invokeai/backend/model_management/model_cache.py @@ -272,7 +272,7 @@ class ModelCache(object): source_device = cache_entry.model.device # Note: We compare device types only so that 'cuda' == 'cuda:0'. This would need to be revised to support # multi-GPU. - if source_device.type == target_device.type: + if torch.device(source_device).type == torch.device(target_device).type: return start_model_to_time = time.time()