mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix broken --ckpt_convert option
- not sure why, but at some pont --ckpt_convert (which converts legacy checkpoints) into diffusers in memory, stopped working due to float16/float32 issues. - this commit repairs the problem - also removed some debugging messages I found in passing
This commit is contained in:
parent
7beebc3659
commit
a4c0dfb33c
@ -803,6 +803,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
|||||||
extract_ema:bool=True,
|
extract_ema:bool=True,
|
||||||
upcast_attn:bool=False,
|
upcast_attn:bool=False,
|
||||||
vae:AutoencoderKL=None,
|
vae:AutoencoderKL=None,
|
||||||
|
precision:torch.dtype=torch.float32,
|
||||||
return_generator_pipeline:bool=False,
|
return_generator_pipeline:bool=False,
|
||||||
)->Union[StableDiffusionPipeline,StableDiffusionGeneratorPipeline]:
|
)->Union[StableDiffusionPipeline,StableDiffusionGeneratorPipeline]:
|
||||||
'''
|
'''
|
||||||
@ -828,6 +829,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
|||||||
checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights
|
checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights
|
||||||
or not. Defaults to `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher
|
or not. Defaults to `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher
|
||||||
quality images for inference. Non-EMA weights are usually better to continue fine-tuning.
|
quality images for inference. Non-EMA weights are usually better to continue fine-tuning.
|
||||||
|
:param precision: precision to use - torch.float16, torch.float32 or torch.autocast
|
||||||
:param upcast_attention: Whether the attention computation should always be upcasted. This is necessary when
|
:param upcast_attention: Whether the attention computation should always be upcasted. This is necessary when
|
||||||
running stable diffusion 2.1.
|
running stable diffusion 2.1.
|
||||||
'''
|
'''
|
||||||
@ -837,7 +839,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
|||||||
verbosity = dlogging.get_verbosity()
|
verbosity = dlogging.get_verbosity()
|
||||||
dlogging.set_verbosity_error()
|
dlogging.set_verbosity_error()
|
||||||
|
|
||||||
checkpoint = load_file(checkpoint_path) if Path(checkpoint_path).suffix == '.safetensors' else torch.load(checkpoint_path)
|
checkpoint = load_file(checkpoint_path,device='cpu') if Path(checkpoint_path).suffix == '.safetensors' else torch.load(checkpoint_path,device='cpu')
|
||||||
cache_dir = global_cache_dir('hub')
|
cache_dir = global_cache_dir('hub')
|
||||||
pipeline_class = StableDiffusionGeneratorPipeline if return_generator_pipeline else StableDiffusionPipeline
|
pipeline_class = StableDiffusionGeneratorPipeline if return_generator_pipeline else StableDiffusionPipeline
|
||||||
|
|
||||||
@ -988,12 +990,12 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
|||||||
safety_checker = StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker',cache_dir=global_cache_dir("hub"))
|
safety_checker = StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker',cache_dir=global_cache_dir("hub"))
|
||||||
feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker",cache_dir=cache_dir)
|
feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker",cache_dir=cache_dir)
|
||||||
pipe = pipeline_class(
|
pipe = pipeline_class(
|
||||||
vae=vae,
|
vae=vae.to(precision),
|
||||||
text_encoder=text_model,
|
text_encoder=text_model.to(precision),
|
||||||
tokenizer=tokenizer,
|
tokenizer=tokenizer,
|
||||||
unet=unet,
|
unet=unet.to(precision),
|
||||||
scheduler=scheduler,
|
scheduler=scheduler,
|
||||||
safety_checker=safety_checker,
|
safety_checker=safety_checker.to(precision),
|
||||||
feature_extractor=feature_extractor,
|
feature_extractor=feature_extractor,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
@ -336,7 +336,6 @@ class Generator:
|
|||||||
if self.caution_img:
|
if self.caution_img:
|
||||||
return self.caution_img
|
return self.caution_img
|
||||||
path = Path(web_assets.__path__[0]) / CAUTION_IMG
|
path = Path(web_assets.__path__[0]) / CAUTION_IMG
|
||||||
print(f'DEBUG: path to caution = {path}')
|
|
||||||
caution = Image.open(path)
|
caution = Image.open(path)
|
||||||
self.caution_img = caution.resize((caution.width // 2, caution.height //2))
|
self.caution_img = caution.resize((caution.width // 2, caution.height //2))
|
||||||
return self.caution_img
|
return self.caution_img
|
||||||
|
@ -40,8 +40,6 @@ class Omnibus(Img2Img,Txt2Img):
|
|||||||
self.perlin = perlin
|
self.perlin = perlin
|
||||||
num_samples = 1
|
num_samples = 1
|
||||||
|
|
||||||
print('DEBUG: IN OMNIBUS')
|
|
||||||
|
|
||||||
sampler.make_schedule(
|
sampler.make_schedule(
|
||||||
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
ddim_num_steps=steps, ddim_eta=ddim_eta, verbose=False
|
||||||
)
|
)
|
||||||
|
@ -385,19 +385,25 @@ class ModelManager(object):
|
|||||||
from ldm.invoke.ckpt_to_diffuser import (
|
from ldm.invoke.ckpt_to_diffuser import (
|
||||||
load_pipeline_from_original_stable_diffusion_ckpt,
|
load_pipeline_from_original_stable_diffusion_ckpt,
|
||||||
)
|
)
|
||||||
|
self.offload_model(self.current_model)
|
||||||
if vae_config := self._choose_diffusers_vae(model_name):
|
if vae_config := self._choose_diffusers_vae(model_name):
|
||||||
vae = self._load_vae(vae_config)
|
vae = self._load_vae(vae_config)
|
||||||
|
if self._has_cuda():
|
||||||
|
torch.cuda.empty_cache()
|
||||||
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
|
pipeline = load_pipeline_from_original_stable_diffusion_ckpt(
|
||||||
checkpoint_path=weights,
|
checkpoint_path=weights,
|
||||||
original_config_file=config,
|
original_config_file=config,
|
||||||
vae=vae,
|
vae=vae,
|
||||||
return_generator_pipeline=True,
|
return_generator_pipeline=True,
|
||||||
|
precision=torch.float16 if self.precision=='float16' else torch.float32,
|
||||||
)
|
)
|
||||||
|
if self.sequential_offload:
|
||||||
|
pipeline.enable_offload_submodels(self.device)
|
||||||
|
else:
|
||||||
|
pipeline.to(self.device)
|
||||||
|
|
||||||
return (
|
return (
|
||||||
pipeline.to(self.device).to(
|
pipeline,
|
||||||
torch.float16 if self.precision == "float16" else torch.float32
|
|
||||||
),
|
|
||||||
width,
|
width,
|
||||||
height,
|
height,
|
||||||
"NOHASH",
|
"NOHASH",
|
||||||
|
@ -421,7 +421,6 @@ def do_front_end(args: Namespace):
|
|||||||
save_args(args)
|
save_args(args)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print(f"DEBUG: args = {args}")
|
|
||||||
do_textual_inversion_training(**args)
|
do_textual_inversion_training(**args)
|
||||||
copy_to_embeddings_folder(args)
|
copy_to_embeddings_folder(args)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
Loading…
Reference in New Issue
Block a user