improve importation and conversion of legacy checkpoint files

A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.

To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:

1. When initially called, the caller can pass a config file path, in
which case it will be used.

2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
   my-new-model.safetensors
   my-new-model.yaml
```
   The yaml file is then used as the configuration file for
   importation and conversion.

3. If no such file is found, then the method opens up the checkpoint
   and probes it to determine whether it is V1, V1-inpaint or V2.
   If it is a V1 format, then the appropriate v1-inference.yaml config
   file is used. Unfortunately there are two V2 variants that cannot be
   distinguished by introspection.

4. If the probe algorithm is unable to determine the model type, then its
   last-ditch effort is to execute an optional callback function that can
   be provided by the caller. This callback, named `config_file_callback`
   receives the path to the legacy checkpoint and returns the path to the
   config file to use. The CLI uses to put up a multiple choice prompt to
   the user. The WebUI **could** use this to prompt the user to choose
   from a radio-button selection.

5. If the config file cannot be determined, then the import is abandoned.

The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:

```
    my-new-model.safetensors
    my-new-model.vae.pt
```

For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.

No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
This commit is contained in:
Lincoln Stein 2023-03-27 11:27:45 -04:00
parent 09dfde0ba1
commit fe5d9ad171
2 changed files with 77 additions and 57 deletions

View File

@ -18,7 +18,7 @@ import warnings
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
from shutil import move, rmtree from shutil import move, rmtree
from typing import Any, Optional, Union from typing import Any, Optional, Union, Callable
import safetensors import safetensors
import safetensors.torch import safetensors.torch
@ -630,14 +630,13 @@ class ModelManager(object):
def heuristic_import( def heuristic_import(
self, self,
path_url_or_repo: str, path_url_or_repo: str,
convert: bool = True,
model_name: str = None, model_name: str = None,
description: str = None, description: str = None,
model_config_file: Path = None, model_config_file: Path = None,
commit_to_conf: Path = None, commit_to_conf: Path = None,
config_file_callback: Callable[[Path], Path] = None,
) -> str: ) -> str:
""" """Accept a string which could be:
Accept a string which could be:
- a HF diffusers repo_id - a HF diffusers repo_id
- a URL pointing to a legacy .ckpt or .safetensors file - a URL pointing to a legacy .ckpt or .safetensors file
- a local path pointing to a legacy .ckpt or .safetensors file - a local path pointing to a legacy .ckpt or .safetensors file
@ -651,16 +650,20 @@ class ModelManager(object):
The model_name and/or description can be provided. If not, they will The model_name and/or description can be provided. If not, they will
be generated automatically. be generated automatically.
If convert is true, legacy models will be converted to diffusers
before importing.
If commit_to_conf is provided, the newly loaded model will be written If commit_to_conf is provided, the newly loaded model will be written
to the `models.yaml` file at the indicated path. Otherwise, the changes to the `models.yaml` file at the indicated path. Otherwise, the changes
will only remain in memory. will only remain in memory.
The (potentially derived) name of the model is returned on success, or None The routine will do its best to figure out the config file
on failure. When multiple models are added from a directory, only the last needed to convert legacy checkpoint file, but if it can't it
imported one is returned. will call the config_file_callback routine, if provided. The
callback accepts a single argument, the Path to the checkpoint
file, and returns a Path to the config file to use.
The (potentially derived) name of the model is returned on
success, or None on failure. When multiple models are added
from a directory, only the last imported one is returned.
""" """
model_path: Path = None model_path: Path = None
thing = path_url_or_repo # to save typing thing = path_url_or_repo # to save typing
@ -707,7 +710,7 @@ class ModelManager(object):
Path(thing).rglob("*.safetensors") Path(thing).rglob("*.safetensors")
): ):
if model_name := self.heuristic_import( if model_name := self.heuristic_import(
str(m), convert, commit_to_conf=commit_to_conf str(m), commit_to_conf=commit_to_conf
): ):
print(f" >> {model_name} successfully imported") print(f" >> {model_name} successfully imported")
return model_name return model_name
@ -735,7 +738,7 @@ class ModelManager(object):
# another round of heuristics to guess the correct config file. # another round of heuristics to guess the correct config file.
checkpoint = None checkpoint = None
if model_path.suffix.endswith((".ckpt",".pt")): if model_path.suffix in [".ckpt",".pt"]:
self.scan_model(model_path,model_path) self.scan_model(model_path,model_path)
checkpoint = torch.load(model_path) checkpoint = torch.load(model_path)
else: else:
@ -743,43 +746,62 @@ class ModelManager(object):
# additional probing needed if no config file provided # additional probing needed if no config file provided
if model_config_file is None: if model_config_file is None:
model_type = self.probe_model_type(checkpoint) # look for a like-named .yaml file in same directory
if model_type == SDLegacyType.V1: if model_path.with_suffix(".yaml").exists():
print(" | SD-v1 model detected") model_config_file = model_path.with_suffix(".yaml")
model_config_file = Path( print(f" | Using config file {model_config_file.name}")
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
)
elif model_type == SDLegacyType.V1_INPAINT:
print(" | SD-v1 inpainting model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
)
elif model_type == SDLegacyType.V2_v:
print(
" | SD-v2-v model detected; model will be converted to diffusers format"
)
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
)
convert = True
elif model_type == SDLegacyType.V2_e:
print(
" | SD-v2-e model detected; model will be converted to diffusers format"
)
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
)
convert = True
elif model_type == SDLegacyType.V2:
print(
f"** {thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path."
)
return
else: else:
print( model_type = self.probe_model_type(checkpoint)
f"** {thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide configuration file path." if model_type == SDLegacyType.V1:
) print(" | SD-v1 model detected")
return model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
)
elif model_type == SDLegacyType.V1_INPAINT:
print(" | SD-v1 inpainting model detected")
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v1-inpainting-inference.yaml"
)
elif model_type == SDLegacyType.V2_v:
print(
" | SD-v2-v model detected"
)
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
)
elif model_type == SDLegacyType.V2_e:
print(
" | SD-v2-e model detected"
)
model_config_file = Path(
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
)
elif model_type == SDLegacyType.V2:
print(
f"** {thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path."
)
return
else:
print(
f"** {thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide configuration file path."
)
return
if not model_config_file and config_file_callback:
model_config_file = config_file_callback(model_path)
# despite our best efforts, we could not find a model config file, so give up
if not model_config_file:
return
# look for a custom vae, a like-named file ending with .vae in the same directory
vae_path = None
for suffix in ["pt", "ckpt", "safetensors"]:
if (model_path.with_suffix(f".vae.{suffix}")).exists():
vae_path = model_path.with_suffix(f".vae.{suffix}")
print(f" | Using VAE file {vae_path.name}")
vae = None if vae_path else dict(repo_id="stabilityai/sd-vae-ft-mse")
diffuser_path = Path( diffuser_path = Path(
Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem
@ -787,7 +809,8 @@ class ModelManager(object):
model_name = self.convert_and_import( model_name = self.convert_and_import(
model_path, model_path,
diffusers_path=diffuser_path, diffusers_path=diffuser_path,
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"), vae=vae,
vae_path=str(vae_path),
model_name=model_name, model_name=model_name,
model_description=description, model_description=description,
original_config_file=model_config_file, original_config_file=model_config_file,
@ -829,8 +852,8 @@ class ModelManager(object):
return return
model_name = model_name or diffusers_path.name model_name = model_name or diffusers_path.name
model_description = model_description or f"Optimized version of {model_name}" model_description = model_description or f"Converted version of {model_name}"
print(f">> Optimizing {model_name} (30-60s)") print(f" | Converting {model_name} to diffusers (30-60s)")
try: try:
# By passing the specified VAE to the conversion function, the autoencoder # By passing the specified VAE to the conversion function, the autoencoder
# will be built into the model rather than tacked on afterward via the config file # will be built into the model rather than tacked on afterward via the config file
@ -848,7 +871,7 @@ class ModelManager(object):
scan_needed=scan_needed, scan_needed=scan_needed,
) )
print( print(
f" | Success. Optimized model is now located at {str(diffusers_path)}" f" | Success. Converted model is now located at {str(diffusers_path)}"
) )
print(f" | Writing new config file entry for {model_name}") print(f" | Writing new config file entry for {model_name}")
new_config = dict( new_config = dict(

View File

@ -626,7 +626,7 @@ def set_default_output_dir(opt: Args, completer: Completer):
completer.set_default_dir(opt.outdir) completer.set_default_dir(opt.outdir)
def import_model(model_path: str, gen, opt, completer, convert=False): def import_model(model_path: str, gen, opt, completer):
""" """
model_path can be (1) a URL to a .ckpt file; (2) a local .ckpt file path; model_path can be (1) a URL to a .ckpt file; (2) a local .ckpt file path;
(3) a huggingface repository id; or (4) a local directory containing a (3) a huggingface repository id; or (4) a local directory containing a
@ -657,7 +657,6 @@ def import_model(model_path: str, gen, opt, completer, convert=False):
model_path, model_path,
model_name=model_name, model_name=model_name,
description=model_desc, description=model_desc,
convert=convert,
) )
if not imported_name: if not imported_name:
@ -666,7 +665,6 @@ def import_model(model_path: str, gen, opt, completer, convert=False):
model_path, model_path,
model_name=model_name, model_name=model_name,
description=model_desc, description=model_desc,
convert=convert,
model_config_file=config_file, model_config_file=config_file,
) )
if not imported_name: if not imported_name:
@ -757,7 +755,6 @@ def _get_model_name_and_desc(
) )
return model_name, model_description return model_name, model_description
def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer): def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
model_name_or_path = model_name_or_path.replace("\\", "/") # windows model_name_or_path = model_name_or_path.replace("\\", "/") # windows
manager = gen.model_manager manager = gen.model_manager
@ -788,7 +785,7 @@ def convert_model(model_name_or_path: Union[Path, str], gen, opt, completer):
) )
else: else:
try: try:
import_model(model_name_or_path, gen, opt, completer, convert=True) import_model(model_name_or_path, gen, opt, completer)
except KeyboardInterrupt: except KeyboardInterrupt:
return return