clean up diagnostic messages

This commit is contained in:
Lincoln Stein
2023-02-19 19:38:29 -05:00
parent ca10d0652f
commit 5461318eda
3 changed files with 176 additions and 284 deletions

View File

@ -16,6 +16,7 @@ import sys
import textwrap
import time
import warnings
from enum import Enum
from pathlib import Path
from shutil import move, rmtree
from typing import Any, Optional, Union
@ -33,11 +34,16 @@ from picklescan.scanner import scan_file_path
from ldm.invoke.generator.diffusers_pipeline import \
StableDiffusionGeneratorPipeline
from ldm.invoke.globals import (Globals, global_autoscan_dir, global_cache_dir,
global_models_dir)
from ldm.invoke.globals import (Globals, global_cache_dir)
from ldm.util import (ask_user, download_with_resume,
url_attachment_name, instantiate_from_config)
class SDLegacyType(Enum):
V1 = 1
V1_INPAINT = 2
V2 = 3
UNKNOWN = 99
DEFAULT_MAX_MODELS = 2
VAE_TO_REPO_ID = { # hack, see note in convert_and_import()
"vae-ft-mse-840000-ema-pruned": "stabilityai/sd-vae-ft-mse",
@ -467,19 +473,6 @@ class ModelManager(object):
for module in model.modules():
if isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)):
module._orig_padding_mode = module.padding_mode
# usage statistics
toc = time.time()
print(">> Model loaded in", "%4.2fs" % (toc - tic))
if self._has_cuda():
print(
">> Max VRAM used to load the model:",
"%4.2fG" % (torch.cuda.max_memory_allocated() / 1e9),
"\n>> Current VRAM usage:"
"%4.2fG" % (torch.cuda.memory_allocated() / 1e9),
)
return model, width, height, model_hash
def _load_diffusers_model(self, mconfig):
@ -611,15 +604,15 @@ class ModelManager(object):
print("### Exiting InvokeAI")
sys.exit()
else:
print(">> Model scanned ok!")
print(">> Model scanned ok")
def import_diffuser_model(
self,
repo_or_path: Union[str, Path],
model_name: str = None,
description: str = None,
vae: dict = None,
commit_to_conf: Path = None,
self,
repo_or_path: Union[str, Path],
model_name: str = None,
model_description: str = None,
vae: dict = None,
commit_to_conf: Path = None,
) -> bool:
"""
Attempts to install the indicated diffuser model and returns True if successful.
@ -657,7 +650,7 @@ class ModelManager(object):
model_name: str = None,
model_description: str = None,
commit_to_conf: Path = None,
) -> bool:
) -> str:
"""
Attempts to install the indicated ckpt file and returns True if successful.
@ -674,6 +667,8 @@ class ModelManager(object):
then these will be derived from the weight file name. If you provide a commit_to_conf
path to the configuration file, then the new entry will be committed to the
models.yaml file.
Return value is the name of the imported file, or None if an error occurred.
"""
if str(weights).startswith(("http:", "https:")):
model_name = model_name or url_attachment_name(weights)
@ -682,9 +677,9 @@ class ModelManager(object):
config_path = self._resolve_path(config, "configs/stable-diffusion")
if weights_path is None or not weights_path.exists():
return False
return
if config_path is None or not config_path.exists():
return False
return
model_name = model_name or Path(weights).stem # note this gives ugly pathnames if used on a URL without a Content-Disposition header
model_description = (
@ -703,41 +698,101 @@ class ModelManager(object):
self.add_model(model_name, new_config, True)
if commit_to_conf:
self.commit(commit_to_conf)
return True
return model_name
@classmethod
def probe_model_type(self, checkpoint: dict)->SDLegacyType:
'''
Given a pickle or safetensors model object, probes contents
of the object and returns an SDLegacyType indicating its
format. Valid return values include:
SDLegacyType.V1
SDLegacyType.V1_INPAINT
SDLegacyType.V2
UNKNOWN
'''
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024:
return SDLegacyType.V2
try:
state_dict = checkpoint.get('state_dict') or checkpoint
in_channels = state_dict['model.diffusion_model.input_blocks.0.0.weight'].shape[1]
if in_channels == 9:
return SDLegacyType.V1_INPAINT
elif in_channels == 4:
return SDLegacyType.V1
else:
return SDLegacyType.UNKNOWN
except KeyError:
return SDLegacyType.UNKNOWN
def heuristic_import(
self,
path_url_or_repo: str,
convert: bool= False,
model_name: str = None,
description: str = None,
commit_to_conf: Path=None,
):
)->str:
'''
Accept a string which could be:
- a HF diffusers repo_id
- a URL pointing to a legacy .ckpt or .safetensors file
- a local path pointing to a legacy .ckpt or .safetensors file
- a local directory containing .ckpt and .safetensors files
- a local directory containing a diffusers model
After determining the nature of the model and downloading it
(if necessary), the file is probed to determine the correct
configuration file (if needed) and it is imported.
The model_name and/or description can be provided. If not, they will
be generated automatically.
If convert is true, legacy models will be converted to diffusers
before importing.
If commit_to_conf is provided, the newly loaded model will be written
to the `models.yaml` file at the indicated path. Otherwise, the changes
will only remain in memory.
The (potentially derived) name of the model is returned on success, or None
on failure. When multiple models are added from a directory, only the last
imported one is returned.
'''
model_path: Path = None
thing = path_url_or_repo # to save typing
print(f'>> Probing {thing} for import')
if thing.startswith(('http:','https:','ftp:')):
print(f' | {thing} appears to be a URL')
print(f' | {thing} appears to be a URL')
model_path = self._resolve_path(thing, 'models/ldm/stable-diffusion-v1') # _resolve_path does a download if needed
elif Path(thing).is_file() and thing.endswith(('.ckpt','.safetensors')):
print(f' | {thing} appears to be a checkpoint file on disk')
print(f' | {thing} appears to be a checkpoint file on disk')
model_path = self._resolve_path(thing, 'models/ldm/stable-diffusion-v1')
elif Path(thing).is_dir() and Path(thing, 'model_index.json').exists():
print(f' | {thing} appears to be a diffusers file on disk')
model_name = self.import_diffusers_model(
print(f' | {thing} appears to be a diffusers file on disk')
model_name = self.import_diffuser_model(
thing,
vae=dict(repo_id='stabilityai/sd-vae-ft-mse'),
vae=dict(repo_id='stabilityai/sd-vae-ft-mse'),
model_name=model_name,
description=description,
commit_to_conf=commit_to_conf
)
elif Path(thing).is_dir():
print(f'>> {thing} appears to be a directory. Will scan for models to import')
for m in list(Path(thing).rglob('*.ckpt')) + list(Path(thing).rglob('*.safetensors')):
self.heuristic_import(str(m), convert, commit_to_conf=commit_to_conf)
return
if model_name := self.heuristic_import(str(m), convert, commit_to_conf=commit_to_conf):
print(f' >> {model_name} successfully imported')
return model_name
elif re.match(r'^[\w.+-]+/[\w.+-]+$', thing):
print(f' | {thing} appears to be a HuggingFace diffusers repo_id')
print(f' | {thing} appears to be a HuggingFace diffusers repo_id')
model_name = self.import_diffuser_model(thing, commit_to_conf=commit_to_conf)
pipeline,_,_,_ = self._load_diffusers_model(self.config[model_name])
@ -750,68 +805,68 @@ class ModelManager(object):
return
if model_path.stem in self.config: #already imported
print(' > Already imported. Skipping')
print(' | Already imported. Skipping')
return
# another round of heuristics to guess the correct config file.
model_config_file = None
checkpoint = safetensors.torch.load_file(model_path) if model_path.suffix == '.safetensors' else torch.load(model_path)
model_type = self.probe_model_type(checkpoint)
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
if key_name in checkpoint and checkpoint[key_name].shape[-1] == 1024:
print(' > SD-v2 model detected; model will be converted to diffusers format')
model_config_file = None
if model_type == SDLegacyType.V1:
print(' | SD-v1 model detected')
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
elif model_type == SDLegacyType.V1_INPAINT:
print(' | SD-v1 inpainting model detected')
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml')
elif model_type == SDLegacyType.V2:
print(' | SD-v2 model detected; model will be converted to diffusers format')
model_config_file = Path(Globals.root,'configs/stable-diffusion/v2-inference-v.yaml')
convert = True
if not model_config_file: # still trying
in_channels = None
try:
state_dict = checkpoint.get('state_dict') or checkpoint
in_channels = state_dict['model.diffusion_model.input_blocks.0.0.weight'].shape[1]
if in_channels == 9:
print(' > SD-v1 inpainting model detected')
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inpainting-inference.yaml')
elif in_channels == 4:
print(' > SD-v1 model detected')
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
else:
print(f'** {thing} does not have an expected number of in_channels ({in_channels}). It will probably break when loaded.')
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
except KeyError:
print(f'** {thing} does not have the expected SD-v1 model fields. It will probably break when loaded.')
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
else:
print(f'** {thing} is a legacy checkpoint file of unkown format. Will treat as a regular v1.X model')
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
if convert:
diffuser_path = Path(Globals.root, 'models',Globals.converted_ckpts_dir, model_path.stem)
self.convert_and_import(
model_name = self.convert_and_import(
model_path,
diffusers_path=diffuser_path,
vae=dict(repo_id='stabilityai/sd-vae-ft-mse'),
model_name=model_name,
model_description=description,
original_config_file=model_config_file,
commit_to_conf=commit_to_conf,
)
else:
self.import_ckpt_model(
model_name = self.import_ckpt_model(
model_path,
config=model_config_file,
model_name=model_name,
model_description=description,
vae=str(Path(Globals.root,'models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt')),
commit_to_conf=commit_to_conf,
)
return model_name
def convert_and_import(
self,
ckpt_path: Path,
diffusers_path: Path,
model_name=None,
model_description=None,
vae=None,
original_config_file: Path = None,
commit_to_conf: Path = None,
self,
ckpt_path: Path,
diffusers_path: Path,
model_name=None,
model_description=None,
vae=None,
original_config_file: Path = None,
commit_to_conf: Path = None,
) -> dict:
"""
Convert a legacy ckpt weights file to diffuser model and import
into models.yaml.
"""
ckpt_path = self._resolve_path(ckpt_path, 'models/ldm/stable-diffusion-v1')
if original_config_file:
original_config_file = self._resolve_path(original_config_file, 'configs/stable-diffusion')
new_config = None
from ldm.invoke.ckpt_to_diffuser import convert_ckpt_to_diffuser
@ -857,7 +912,7 @@ class ModelManager(object):
"** If you are trying to convert an inpainting or 2.X model, please indicate the correct config file (e.g. v1-inpainting-inference.yaml)"
)
return new_config
return model_name
def search_models(self, search_folder):
print(f">> Finding Models In: {search_folder}")