mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
enhance model_manager support for converting inpainting ckpt files
Previously conversions of .ckpt and .safetensors files to diffusers models were failing with channel mismatch errors. This is corrected with this PR. - The model_manager convert_and_import() method now accepts the path to the checkpoint file's configuration file, using the parameter `original_config_file`. For inpainting files this should be set to the full path to `v1-inpainting-inference.yaml`. - If no configuration file is provided in the call, then the presence of an inpainting file will be inferred at the `ldm.ckpt_to_diffuser.convert_ckpt_to_diffUser()` level by looking for the string "inpaint" in the path. AUTO1111 does something similar to this, but it is brittle and not recommended. - This PR also changes the model manager model_names() method to return the model names in case folded sort order.
This commit is contained in:
parent
7f41893da4
commit
f92f62a91b
@ -712,10 +712,12 @@ def _get_model_name_and_desc(model_manager,completer,model_name:str='',model_des
|
||||
def optimize_model(model_name_or_path:str, gen, opt, completer):
|
||||
manager = gen.model_manager
|
||||
ckpt_path = None
|
||||
original_config_file = None
|
||||
|
||||
if (model_info := manager.model_info(model_name_or_path)):
|
||||
if 'weights' in model_info:
|
||||
ckpt_path = Path(model_info['weights'])
|
||||
original_config_file = Path(model_info['config'])
|
||||
model_name = model_name_or_path
|
||||
model_description = model_info['description']
|
||||
else:
|
||||
@ -723,12 +725,18 @@ def optimize_model(model_name_or_path:str, gen, opt, completer):
|
||||
return
|
||||
elif os.path.exists(model_name_or_path):
|
||||
ckpt_path = Path(model_name_or_path)
|
||||
model_name,model_description = _get_model_name_and_desc(
|
||||
model_name, model_description = _get_model_name_and_desc(
|
||||
manager,
|
||||
completer,
|
||||
ckpt_path.stem,
|
||||
f'Converted model {ckpt_path.stem}'
|
||||
)
|
||||
is_inpainting = input('Is this an inpainting model? [n] ').startswith(('y','Y'))
|
||||
original_config_file = Path(
|
||||
'configs',
|
||||
'stable-diffusion',
|
||||
'v1-inpainting-inference.yaml' if is_inpainting else 'v1-inference.yaml'
|
||||
)
|
||||
else:
|
||||
print(f'** {model_name_or_path} is neither an existing model nor the path to a .ckpt file')
|
||||
return
|
||||
@ -736,6 +744,9 @@ def optimize_model(model_name_or_path:str, gen, opt, completer):
|
||||
if not ckpt_path.is_absolute():
|
||||
ckpt_path = Path(Globals.root,ckpt_path)
|
||||
|
||||
if original_config_file and not original_config_file.is_absolute():
|
||||
original_config_file = Path(Globals.root,original_config_file)
|
||||
|
||||
diffuser_path = Path(Globals.root, 'models',Globals.converted_ckpts_dir,model_name)
|
||||
if diffuser_path.exists():
|
||||
print(f'** {model_name_or_path} is already optimized. Will not overwrite. If this is an error, please remove the directory {diffuser_path} and try again.')
|
||||
@ -751,6 +762,7 @@ def optimize_model(model_name_or_path:str, gen, opt, completer):
|
||||
model_name=model_name,
|
||||
model_description=model_description,
|
||||
vae = vae,
|
||||
original_config_file = original_config_file,
|
||||
commit_to_conf=opt.conf,
|
||||
)
|
||||
if not new_config:
|
||||
|
@ -22,7 +22,11 @@ import re
|
||||
import torch
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
from ldm.invoke.globals import Globals, global_cache_dir
|
||||
from ldm.invoke.globals import (
|
||||
Globals,
|
||||
global_cache_dir,
|
||||
global_config_dir,
|
||||
)
|
||||
from safetensors.torch import load_file
|
||||
from typing import Union
|
||||
|
||||
@ -826,6 +830,8 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
:param upcast_attention: Whether the attention computation should always be upcasted. This is necessary when
|
||||
running stable diffusion 2.1.
|
||||
'''
|
||||
|
||||
print(f'DEBUG: original_config_file={original_config_file}')
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter('ignore')
|
||||
@ -852,13 +858,16 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
|
||||
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
||||
|
||||
if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024:
|
||||
original_config_file = os.path.join(Globals.root,'configs','stable-diffusion','v2-inference-v.yaml')
|
||||
original_config_file = global_config_dir() / 'stable-diffusion' / 'v2-inference-v.yaml'
|
||||
|
||||
if global_step == 110000:
|
||||
# v2.1 needs to upcast attention
|
||||
upcast_attention = True
|
||||
elif str(checkpoint_path).lower().find('inpaint') >= 0: # brittle - please pass original_config_file parameter!
|
||||
print(f' | checkpoint has "inpaint" in name, assuming an inpainting model')
|
||||
original_config_file = global_config_dir() / 'stable-diffusion' / 'v1-inpainting-inference.yaml'
|
||||
else:
|
||||
original_config_file = os.path.join(Globals.root,'configs','stable-diffusion','v1-inference.yaml')
|
||||
original_config_file = global_config_dir() / 'stable-diffusion' / 'v1-inference.yaml'
|
||||
|
||||
original_config = OmegaConf.load(original_config_file)
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user