Update base.py (#2543)

Free up CUDA cache right after each image is generated. VRAM usage drops down to pre-generation levels.
This commit is contained in:
Jonathan 2023-02-05 23:14:35 -06:00 committed by GitHub
parent 633f702b39
commit b7ab025f40
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -122,6 +122,10 @@ class Generator:
seed = self.new_seed()
# Free up memory from the last generation.
if self.model.device.type == 'cuda':
torch.cuda.empty_cache()
return results
def sample_to_image(self,samples)->Image.Image: