From b7ab025f406cf6ae30beab5068e322d0c2d672d9 Mon Sep 17 00:00:00 2001 From: Jonathan <34005131+JPPhoto@users.noreply.github.com> Date: Sun, 5 Feb 2023 23:14:35 -0600 Subject: [PATCH] Update base.py (#2543) Free up CUDA cache right after each image is generated. VRAM usage drops down to pre-generation levels. --- ldm/invoke/generator/base.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ldm/invoke/generator/base.py b/ldm/invoke/generator/base.py index da2ade2f0c..f30ab256ae 100644 --- a/ldm/invoke/generator/base.py +++ b/ldm/invoke/generator/base.py @@ -122,6 +122,10 @@ class Generator: seed = self.new_seed() + # Free up memory from the last generation. + if self.model.device.type == 'cuda': + torch.cuda.empty_cache() + return results def sample_to_image(self,samples)->Image.Image: