mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
improve CUDA VRAM monitoring
extra check that device==cuda before getting VRAM stats
This commit is contained in:
parent
bfb2781279
commit
839e30e4b8
@ -357,12 +357,14 @@ class Generate:
|
|||||||
print(
|
print(
|
||||||
f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
|
f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
|
||||||
)
|
)
|
||||||
|
if torch.cuda.is_available() and self.device.type == 'cuda':
|
||||||
print(
|
print(
|
||||||
f'>> Max VRAM used for this generation:',
|
f'>> Max VRAM used for this generation:',
|
||||||
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
|
'%4.2fG.' % (torch.cuda.max_memory_allocated() / 1e9),
|
||||||
|
'Current VRAM utilization:'
|
||||||
|
'%4.2fG' % (torch.cuda.memory_allocated() / 1e9),
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.session_peakmem:
|
|
||||||
self.session_peakmem = max(
|
self.session_peakmem = max(
|
||||||
self.session_peakmem, torch.cuda.max_memory_allocated()
|
self.session_peakmem, torch.cuda.max_memory_allocated()
|
||||||
)
|
)
|
||||||
|
@ -130,7 +130,10 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile):
|
|||||||
command = get_next_command(infile)
|
command = get_next_command(infile)
|
||||||
except EOFError:
|
except EOFError:
|
||||||
done = True
|
done = True
|
||||||
break
|
continue
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
done = True
|
||||||
|
continue
|
||||||
|
|
||||||
# skip empty lines
|
# skip empty lines
|
||||||
if not command.strip():
|
if not command.strip():
|
||||||
|
Loading…
Reference in New Issue
Block a user