diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py index e1c5e743c1..2933b169f6 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -418,7 +418,7 @@ class ModelCache(ModelCacheBase[AnyModel]): def _check_free_vram(self, target_device: torch.device, needed_size: int) -> None: if target_device.type != "cuda": return - vram_device = ( # mem_get_info() needs an indexed device + vram_device = ( # mem_get_info() needs an indexed device target_device if target_device.index is not None else torch.device(str(target_device), index=0) ) free_mem, _ = torch.cuda.mem_get_info(torch.device(vram_device)) diff --git a/invokeai/backend/model_manager/load/model_cache/model_locker.py b/invokeai/backend/model_manager/load/model_cache/model_locker.py index 3651590cec..81dca346e5 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_locker.py +++ b/invokeai/backend/model_manager/load/model_cache/model_locker.py @@ -3,7 +3,9 @@ Base class and implementation of a class that moves models in and out of VRAM. """ import torch + from invokeai.backend.model_manager import AnyModel + from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase