mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
aider: Fixed CPU memory issues by deleting large objects and forcing garbage collection.
# Aider chat conversation: USER: `invokeai/backend/model_manager/convert_ckpt_to_diffusers.py` Im having issues with cpu memory, not gpu memory ASSISTANT: To address CPU memory issues, we can make the following changes: 1. Use `del` to explicitly delete large objects when they are no longer needed. 2. Use `gc.collect()` to force garbage collection after deleting large objects. Here are the *SEARCH/REPLACE* blocks: invokeai/backend/model_manager/convert_ckpt_to_diffusers.py ```python <<<<<<< SEARCH vae = AutoencoderKL(**vae_config) with torch.no_grad(): vae.load_state_dict(converted_vae_checkpoint) vae.to(precision) torch.cuda.empty_cache() ======= vae = AutoencoderKL(**vae_config) with torch.no_grad(): vae.load_state_dict(converted_vae_checkpoint) del converted_vae_checkpoint # Free memory import gc gc.collect() vae.to(precision) >>>>>>> REPLACE ``` invokeai/backend/model_manager/convert_ckpt_to_diffusers.py ```python <<<<<<< SEARCH pipe = download_from_original_stable_diffusion_ckpt(Path(checkpoint_path).as_posix(), **kwargs) with torch.no_grad(): pipe = pipe.to(precision) torch.cuda.empty_cache() ======= pipe = download_from_original_stable_diffusion_ckpt(Path(checkpoint_path).as_posix(), **kwargs) with torch.no_grad(): del kwargs # Free memory import gc gc.collect() pipe = pipe.to(precision) >>>>>>> REPLACE ``` invokeai/backend/model_manager/convert_ckpt_to_diffusers.py ```python <<<<<<< SEARCH pipe = download_controlnet_from_original_ckpt(checkpoint_path.as_posix(), **kwargs) with torch.no_grad(): pipe = pipe.to(precision) torch.cuda.empty_cache() ======= pipe = download_controlnet_from_original_ckpt(checkpoint_path.as_posix(), **kwargs) with torch.no_grad(): del kwargs # Free memory import gc gc.collect() pipe = pipe.to(precision) >>>>>>> REPLACE ```
This commit is contained in:
parent
46950a9bd0
commit
e68c49167a
@ -32,8 +32,10 @@ def convert_ldm_vae_to_diffusers(
|
||||
vae = AutoencoderKL(**vae_config)
|
||||
with torch.no_grad():
|
||||
vae.load_state_dict(converted_vae_checkpoint)
|
||||
del converted_vae_checkpoint # Free memory
|
||||
import gc
|
||||
gc.collect()
|
||||
vae.to(precision)
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
if dump_path:
|
||||
vae.save_pretrained(dump_path, safe_serialization=True)
|
||||
@ -55,8 +57,10 @@ def convert_ckpt_to_diffusers(
|
||||
"""
|
||||
pipe = download_from_original_stable_diffusion_ckpt(Path(checkpoint_path).as_posix(), **kwargs)
|
||||
with torch.no_grad():
|
||||
del kwargs # Free memory
|
||||
import gc
|
||||
gc.collect()
|
||||
pipe = pipe.to(precision)
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# TO DO: save correct repo variant
|
||||
if dump_path:
|
||||
@ -80,8 +84,10 @@ def convert_controlnet_to_diffusers(
|
||||
"""
|
||||
pipe = download_controlnet_from_original_ckpt(checkpoint_path.as_posix(), **kwargs)
|
||||
with torch.no_grad():
|
||||
del kwargs # Free memory
|
||||
import gc
|
||||
gc.collect()
|
||||
pipe = pipe.to(precision)
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# TO DO: save correct repo variant
|
||||
if dump_path:
|
||||
|
Loading…
Reference in New Issue
Block a user