mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
chore: ruff
This commit is contained in:
parent
371e3cc260
commit
37d66488c5
@ -418,7 +418,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
|||||||
def _check_free_vram(self, target_device: torch.device, needed_size: int) -> None:
|
def _check_free_vram(self, target_device: torch.device, needed_size: int) -> None:
|
||||||
if target_device.type != "cuda":
|
if target_device.type != "cuda":
|
||||||
return
|
return
|
||||||
vram_device = ( # mem_get_info() needs an indexed device
|
vram_device = ( # mem_get_info() needs an indexed device
|
||||||
target_device if target_device.index is not None else torch.device(str(target_device), index=0)
|
target_device if target_device.index is not None else torch.device(str(target_device), index=0)
|
||||||
)
|
)
|
||||||
free_mem, _ = torch.cuda.mem_get_info(torch.device(vram_device))
|
free_mem, _ = torch.cuda.mem_get_info(torch.device(vram_device))
|
||||||
|
@ -3,7 +3,9 @@ Base class and implementation of a class that moves models in and out of VRAM.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from invokeai.backend.model_manager import AnyModel
|
from invokeai.backend.model_manager import AnyModel
|
||||||
|
|
||||||
from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase
|
from .model_cache_base import CacheRecord, ModelCacheBase, ModelLockerBase
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user