mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
feat(mm): include needed vs free in OOM
Gives us a bit more visibility into these errors, which seem to be popping up more frequently with the new MM.
This commit is contained in:
parent
7da04b8333
commit
85f53f94f8
@ -429,4 +429,8 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
)
|
||||
free_mem, _ = torch.cuda.mem_get_info(torch.device(vram_device))
|
||||
if needed_size > free_mem:
|
||||
raise torch.cuda.OutOfMemoryError
|
||||
needed_gb = round(needed_size / GIG, 2)
|
||||
free_gb = round(free_mem / GIG, 2)
|
||||
raise torch.cuda.OutOfMemoryError(
|
||||
f"Insufficient VRAM to load model, requested {needed_gb}GB but only had {free_gb}GB free"
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user