improve CUDA VRAM monitoring

extra check that device==cuda before getting VRAM stats
This commit is contained in:
Lincoln Stein 2022-09-11 10:02:44 -04:00
parent bfb2781279
commit 839e30e4b8
2 changed files with 12 additions and 7 deletions

View File

@ -357,12 +357,14 @@ class Generate:
print(
f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
)
print(
f'>> Max VRAM used for this generation:',
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
)
if torch.cuda.is_available() and self.device.type == 'cuda':
print(
f'>> Max VRAM used for this generation:',
'%4.2fG.' % (torch.cuda.max_memory_allocated() / 1e9),
'Current VRAM utilization:'
'%4.2fG' % (torch.cuda.memory_allocated() / 1e9),
)
if self.session_peakmem:
self.session_peakmem = max(
self.session_peakmem, torch.cuda.max_memory_allocated()
)

View File

@ -130,8 +130,11 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile):
command = get_next_command(infile)
except EOFError:
done = True
break
continue
except KeyboardInterrupt:
done = True
continue
# skip empty lines
if not command.strip():
continue