mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
improve CUDA VRAM monitoring
extra check that device==cuda before getting VRAM stats
This commit is contained in:
parent
bfb2781279
commit
839e30e4b8
@ -357,12 +357,14 @@ class Generate:
|
||||
print(
|
||||
f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
|
||||
)
|
||||
print(
|
||||
f'>> Max VRAM used for this generation:',
|
||||
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
|
||||
)
|
||||
if torch.cuda.is_available() and self.device.type == 'cuda':
|
||||
print(
|
||||
f'>> Max VRAM used for this generation:',
|
||||
'%4.2fG.' % (torch.cuda.max_memory_allocated() / 1e9),
|
||||
'Current VRAM utilization:'
|
||||
'%4.2fG' % (torch.cuda.memory_allocated() / 1e9),
|
||||
)
|
||||
|
||||
if self.session_peakmem:
|
||||
self.session_peakmem = max(
|
||||
self.session_peakmem, torch.cuda.max_memory_allocated()
|
||||
)
|
||||
|
@ -130,8 +130,11 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile):
|
||||
command = get_next_command(infile)
|
||||
except EOFError:
|
||||
done = True
|
||||
break
|
||||
|
||||
continue
|
||||
except KeyboardInterrupt:
|
||||
done = True
|
||||
continue
|
||||
|
||||
# skip empty lines
|
||||
if not command.strip():
|
||||
continue
|
||||
|
Loading…
Reference in New Issue
Block a user