chore: ruff

This commit is contained in:
psychedelicious 2024-02-26 17:30:37 +11:00
parent 371e3cc260
commit 37d66488c5
2 changed files with 3 additions and 1 deletions

View File

@ -418,7 +418,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
def _check_free_vram(self, target_device: torch.device, needed_size: int) -> None: def _check_free_vram(self, target_device: torch.device, needed_size: int) -> None:
if target_device.type != "cuda": if target_device.type != "cuda":
return return
vram_device = ( # mem_get_info() needs an indexed device vram_device = ( # mem_get_info() needs an indexed device
target_device if target_device.index is not None else torch.device(str(target_device), index=0) target_device if target_device.index is not None else torch.device(str(target_device), index=0)
) )
free_mem, _ = torch.cuda.mem_get_info(torch.device(vram_device)) free_mem, _ = torch.cuda.mem_get_info(torch.device(vram_device))

View File

@ -3,7 +3,9 @@ Base class and implementation of a class that moves models in and out of VRAM.
""" """
import torch import torch
from invokeai.backend.model_manager import AnyModel from invokeai.backend.model_manager import AnyModel
from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase