From 4b560b50c21dcf67872753d11410671f5b48531a Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 31 Aug 2022 16:59:27 -0400 Subject: [PATCH] fix AttributeError crash when running on non-CUDA systems (#256) * fix AttributeError crash when running on non-CUDA systems; closes issue #234 and issue #250 * although this prevents dream.py script from crashing immediately on MPS systems, MPS support still very much a work in progress. --- ldm/simplet2i.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/ldm/simplet2i.py b/ldm/simplet2i.py index a28670fc05..88cbb6ff78 100644 --- a/ldm/simplet2i.py +++ b/ldm/simplet2i.py @@ -157,7 +157,9 @@ class T2I: self.latent_diffusion_weights = latent_diffusion_weights self.device = device - self.session_peakmem = torch.cuda.max_memory_allocated() + # for VRAM usage statistics + self.session_peakmem = torch.cuda.max_memory_allocated() if self.device == 'cuda' else None + if seed is None: self.seed = self._new_seed() else: @@ -363,9 +365,6 @@ class T2I: print('Are you sure your system has an adequate NVIDIA GPU?') toc = time.time() - self.session_peakmem = max( - self.session_peakmem, torch.cuda.max_memory_allocated() - ) print('Usage stats:') print( f' {len(results)} image(s) generated in', '%4.2fs' % (toc - tic) @@ -374,10 +373,15 @@ class T2I: f' Max VRAM used for this generation:', '%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9), ) - print( - f' Max VRAM used since script start: ', - '%4.2fG' % (self.session_peakmem / 1e9), - ) + + if self.session_peakmem: + self.session_peakmem = max( + self.session_peakmem, torch.cuda.max_memory_allocated() + ) + print( + f' Max VRAM used since script start: ', + '%4.2fG' % (self.session_peakmem / 1e9), + ) return results @torch.no_grad()