From 37d66488c55f7fed21b9a0a1cc2984812bd152fd Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 26 Feb 2024 17:30:37 +1100 Subject: [PATCH] chore: ruff --- .../model_manager/load/model_cache/model_cache_default.py | 2 +- invokeai/backend/model_manager/load/model_cache/model_locker.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py index e1c5e743c1..2933b169f6 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache_default.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache_default.py @@ -418,7 +418,7 @@ class ModelCache(ModelCacheBase[AnyModel]): def _check_free_vram(self, target_device: torch.device, needed_size: int) -> None: if target_device.type != "cuda": return - vram_device = ( # mem_get_info() needs an indexed device + vram_device = ( # mem_get_info() needs an indexed device target_device if target_device.index is not None else torch.device(str(target_device), index=0) ) free_mem, _ = torch.cuda.mem_get_info(torch.device(vram_device)) diff --git a/invokeai/backend/model_manager/load/model_cache/model_locker.py b/invokeai/backend/model_manager/load/model_cache/model_locker.py index 3651590cec..81dca346e5 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_locker.py +++ b/invokeai/backend/model_manager/load/model_cache/model_locker.py @@ -3,7 +3,9 @@ Base class and implementation of a class that moves models in and out of VRAM. """ import torch + from invokeai.backend.model_manager import AnyModel + from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase