fix AttributeError crash when running on non-CUDA systems (#256)

* fix AttributeError crash when running on non-CUDA systems; closes issue #234 and issue #250
* although this prevents dream.py script from crashing immediately on MPS systems, MPS support still very much a work in progress.
This commit is contained in:
Lincoln Stein 2022-08-31 16:59:27 -04:00 committed by GitHub
parent 9ad79207c2
commit 4b560b50c2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -157,7 +157,9 @@ class T2I:
self.latent_diffusion_weights = latent_diffusion_weights
self.device = device
self.session_peakmem = torch.cuda.max_memory_allocated()
# for VRAM usage statistics
self.session_peakmem = torch.cuda.max_memory_allocated() if self.device == 'cuda' else None
if seed is None:
self.seed = self._new_seed()
else:
@ -363,9 +365,6 @@ class T2I:
print('Are you sure your system has an adequate NVIDIA GPU?')
toc = time.time()
self.session_peakmem = max(
self.session_peakmem, torch.cuda.max_memory_allocated()
)
print('Usage stats:')
print(
f' {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
@ -374,10 +373,15 @@ class T2I:
f' Max VRAM used for this generation:',
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
)
print(
f' Max VRAM used since script start: ',
'%4.2fG' % (self.session_peakmem / 1e9),
)
if self.session_peakmem:
self.session_peakmem = max(
self.session_peakmem, torch.cuda.max_memory_allocated()
)
print(
f' Max VRAM used since script start: ',
'%4.2fG' % (self.session_peakmem / 1e9),
)
return results
@torch.no_grad()