improve CUDA VRAM monitoring

extra check that device==cuda before getting VRAM stats
This commit is contained in:
Lincoln Stein 2022-09-11 10:02:44 -04:00
parent bfb2781279
commit 839e30e4b8
2 changed files with 12 additions and 7 deletions

View File

@ -357,12 +357,14 @@ class Generate:
print( print(
f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic) f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
) )
print( if torch.cuda.is_available() and self.device.type == 'cuda':
f'>> Max VRAM used for this generation:', print(
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9), f'>> Max VRAM used for this generation:',
) '%4.2fG.' % (torch.cuda.max_memory_allocated() / 1e9),
'Current VRAM utilization:'
'%4.2fG' % (torch.cuda.memory_allocated() / 1e9),
)
if self.session_peakmem:
self.session_peakmem = max( self.session_peakmem = max(
self.session_peakmem, torch.cuda.max_memory_allocated() self.session_peakmem, torch.cuda.max_memory_allocated()
) )

View File

@ -130,7 +130,10 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile):
command = get_next_command(infile) command = get_next_command(infile)
except EOFError: except EOFError:
done = True done = True
break continue
except KeyboardInterrupt:
done = True
continue
# skip empty lines # skip empty lines
if not command.strip(): if not command.strip():