fix broken --ckpt_convert option

- not sure why, but at some pont --ckpt_convert (which converts legacy checkpoints)
  into diffusers in memory, stopped working due to float16/float32 issues.

- this commit repairs the problem

- also removed some debugging messages I found in passing
This commit is contained in:
Lincoln Stein
2023-02-20 01:12:02 -05:00
parent 7beebc3659
commit a4c0dfb33c
5 changed files with 17 additions and 13 deletions

View File

@ -385,19 +385,25 @@ class ModelManager(object):
from ldm.invoke.ckpt_to_diffuser import (
load_pipeline_from_original_stable_diffusion_ckpt,
)
self.offload_model(self.current_model)
if vae_config := self._choose_diffusers_vae(model_name):
vae = self._load_vae(vae_config)
if self._has_cuda():
torch.cuda.empty_cache()
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
checkpoint_path=weights,
original_config_file=config,
vae=vae,
return_generator_pipeline=True,
precision=torch.float16 if self.precision=='float16' else torch.float32,
)
if self.sequential_offload:
pipeline.enable_offload_submodels(self.device)
else:
pipeline.to(self.device)
return (
pipeline.to(self.device).to(
torch.float16 if self.precision == "float16" else torch.float32
),
pipeline,
width,
height,
"NOHASH",