mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix AttributeError crash when running on non-CUDA systems (#256)
* fix AttributeError crash when running on non-CUDA systems; closes issue #234 and issue #250 * although this prevents dream.py script from crashing immediately on MPS systems, MPS support still very much a work in progress.
This commit is contained in:
parent
9ad79207c2
commit
4b560b50c2
@ -157,7 +157,9 @@ class T2I:
|
|||||||
self.latent_diffusion_weights = latent_diffusion_weights
|
self.latent_diffusion_weights = latent_diffusion_weights
|
||||||
self.device = device
|
self.device = device
|
||||||
|
|
||||||
self.session_peakmem = torch.cuda.max_memory_allocated()
|
# for VRAM usage statistics
|
||||||
|
self.session_peakmem = torch.cuda.max_memory_allocated() if self.device == 'cuda' else None
|
||||||
|
|
||||||
if seed is None:
|
if seed is None:
|
||||||
self.seed = self._new_seed()
|
self.seed = self._new_seed()
|
||||||
else:
|
else:
|
||||||
@ -363,9 +365,6 @@ class T2I:
|
|||||||
print('Are you sure your system has an adequate NVIDIA GPU?')
|
print('Are you sure your system has an adequate NVIDIA GPU?')
|
||||||
|
|
||||||
toc = time.time()
|
toc = time.time()
|
||||||
self.session_peakmem = max(
|
|
||||||
self.session_peakmem, torch.cuda.max_memory_allocated()
|
|
||||||
)
|
|
||||||
print('Usage stats:')
|
print('Usage stats:')
|
||||||
print(
|
print(
|
||||||
f' {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
|
f' {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
|
||||||
@ -374,10 +373,15 @@ class T2I:
|
|||||||
f' Max VRAM used for this generation:',
|
f' Max VRAM used for this generation:',
|
||||||
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
|
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
|
||||||
)
|
)
|
||||||
print(
|
|
||||||
f' Max VRAM used since script start: ',
|
if self.session_peakmem:
|
||||||
'%4.2fG' % (self.session_peakmem / 1e9),
|
self.session_peakmem = max(
|
||||||
)
|
self.session_peakmem, torch.cuda.max_memory_allocated()
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
f' Max VRAM used since script start: ',
|
||||||
|
'%4.2fG' % (self.session_peakmem / 1e9),
|
||||||
|
)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user