From e68c49167a944114ca1ce0e6b80c8b87d5bde214 Mon Sep 17 00:00:00 2001 From: Brandon Rising Date: Tue, 28 May 2024 15:05:36 -0400 Subject: [PATCH] aider: Fixed CPU memory issues by deleting large objects and forcing garbage collection. # Aider chat conversation: USER: `invokeai/backend/model_manager/convert_ckpt_to_diffusers.py` Im having issues with cpu memory, not gpu memory ASSISTANT: To address CPU memory issues, we can make the following changes: 1. Use `del` to explicitly delete large objects when they are no longer needed. 2. Use `gc.collect()` to force garbage collection after deleting large objects. Here are the *SEARCH/REPLACE* blocks: invokeai/backend/model_manager/convert_ckpt_to_diffusers.py ```python <<<<<<< SEARCH vae = AutoencoderKL(**vae_config) with torch.no_grad(): vae.load_state_dict(converted_vae_checkpoint) vae.to(precision) torch.cuda.empty_cache() ======= vae = AutoencoderKL(**vae_config) with torch.no_grad(): vae.load_state_dict(converted_vae_checkpoint) del converted_vae_checkpoint # Free memory import gc gc.collect() vae.to(precision) >>>>>>> REPLACE ``` invokeai/backend/model_manager/convert_ckpt_to_diffusers.py ```python <<<<<<< SEARCH pipe = download_from_original_stable_diffusion_ckpt(Path(checkpoint_path).as_posix(), **kwargs) with torch.no_grad(): pipe = pipe.to(precision) torch.cuda.empty_cache() ======= pipe = download_from_original_stable_diffusion_ckpt(Path(checkpoint_path).as_posix(), **kwargs) with torch.no_grad(): del kwargs # Free memory import gc gc.collect() pipe = pipe.to(precision) >>>>>>> REPLACE ``` invokeai/backend/model_manager/convert_ckpt_to_diffusers.py ```python <<<<<<< SEARCH pipe = download_controlnet_from_original_ckpt(checkpoint_path.as_posix(), **kwargs) with torch.no_grad(): pipe = pipe.to(precision) torch.cuda.empty_cache() ======= pipe = download_controlnet_from_original_ckpt(checkpoint_path.as_posix(), **kwargs) with torch.no_grad(): del kwargs # Free memory import gc gc.collect() pipe = pipe.to(precision) >>>>>>> REPLACE ``` --- .../model_manager/convert_ckpt_to_diffusers.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/invokeai/backend/model_manager/convert_ckpt_to_diffusers.py b/invokeai/backend/model_manager/convert_ckpt_to_diffusers.py index 80079f019c..09e34322c7 100644 --- a/invokeai/backend/model_manager/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_manager/convert_ckpt_to_diffusers.py @@ -32,8 +32,10 @@ def convert_ldm_vae_to_diffusers( vae = AutoencoderKL(**vae_config) with torch.no_grad(): vae.load_state_dict(converted_vae_checkpoint) + del converted_vae_checkpoint # Free memory + import gc + gc.collect() vae.to(precision) - torch.cuda.empty_cache() if dump_path: vae.save_pretrained(dump_path, safe_serialization=True) @@ -55,8 +57,10 @@ def convert_ckpt_to_diffusers( """ pipe = download_from_original_stable_diffusion_ckpt(Path(checkpoint_path).as_posix(), **kwargs) with torch.no_grad(): + del kwargs # Free memory + import gc + gc.collect() pipe = pipe.to(precision) - torch.cuda.empty_cache() # TO DO: save correct repo variant if dump_path: @@ -80,8 +84,10 @@ def convert_controlnet_to_diffusers( """ pipe = download_controlnet_from_original_ckpt(checkpoint_path.as_posix(), **kwargs) with torch.no_grad(): + del kwargs # Free memory + import gc + gc.collect() pipe = pipe.to(precision) - torch.cuda.empty_cache() # TO DO: save correct repo variant if dump_path: