2023-05-05 23:32:28 +00:00
|
|
|
"""This module manages the InvokeAI `models.yaml` file, mapping
|
|
|
|
symbolic diffusers model names to the paths and repo_ids used
|
|
|
|
by the underlying `from_pretrained()` call.
|
|
|
|
|
|
|
|
For fetching models, use manager.get_model('symbolic name'). This will
|
|
|
|
return a SDModelInfo object that contains the following attributes:
|
|
|
|
|
|
|
|
* context -- a context manager Generator that loads and locks the
|
|
|
|
model into GPU VRAM and returns the model for use.
|
|
|
|
See below for usage.
|
|
|
|
* name -- symbolic name of the model
|
2023-05-13 18:44:44 +00:00
|
|
|
* type -- SDModelType of the model
|
2023-05-05 23:32:28 +00:00
|
|
|
* hash -- unique hash for the model
|
|
|
|
* location -- path or repo_id of the model
|
|
|
|
* revision -- revision of the model if coming from a repo id,
|
|
|
|
e.g. 'fp16'
|
|
|
|
* precision -- torch precision of the model
|
|
|
|
* status -- a ModelStatus enum corresponding to one of
|
|
|
|
'not_loaded', 'in_ram', 'in_vram' or 'active'
|
|
|
|
|
|
|
|
Typical usage:
|
|
|
|
|
|
|
|
from invokeai.backend import ModelManager
|
2023-05-08 03:18:17 +00:00
|
|
|
|
|
|
|
manager = ModelManager(
|
|
|
|
config='./configs/models.yaml',
|
|
|
|
max_cache_size=8
|
|
|
|
) # gigabytes
|
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
model_info = manager.get_model('stable-diffusion-1.5', SDModelType.Diffusers)
|
2023-05-05 23:32:28 +00:00
|
|
|
with model_info.context as my_model:
|
|
|
|
my_model.latents_from_embeddings(...)
|
|
|
|
|
|
|
|
The manager uses the underlying ModelCache class to keep
|
|
|
|
frequently-used models in RAM and move them into GPU as needed for
|
2023-05-08 03:18:17 +00:00
|
|
|
generation operations. The optional `max_cache_size` argument
|
|
|
|
indicates the maximum size the cache can grow to, in gigabytes. The
|
|
|
|
underlying ModelCache object can be accessed using the manager's "cache"
|
|
|
|
attribute.
|
2023-05-05 23:32:28 +00:00
|
|
|
|
2023-05-08 03:18:17 +00:00
|
|
|
Because the model manager can return multiple different types of
|
|
|
|
models, you may wish to add additional type checking on the class
|
|
|
|
of model returned. To do this, provide the option `model_type`
|
|
|
|
parameter:
|
|
|
|
|
|
|
|
model_info = manager.get_model(
|
|
|
|
'clip-tokenizer',
|
2023-05-14 00:06:26 +00:00
|
|
|
model_type=SDModelType.Tokenizer
|
2023-05-08 03:18:17 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
This will raise an InvalidModelError if the format defined in the
|
|
|
|
config file doesn't match the requested model type.
|
|
|
|
|
|
|
|
MODELS.YAML
|
2023-05-06 19:58:44 +00:00
|
|
|
|
|
|
|
The general format of a models.yaml section is:
|
|
|
|
|
2023-05-13 18:44:44 +00:00
|
|
|
type-of-model/name-of-model:
|
|
|
|
format: folder|ckpt|safetensors
|
2023-05-06 19:58:44 +00:00
|
|
|
repo_id: owner/repo
|
|
|
|
path: /path/to/local/file/or/directory
|
|
|
|
subfolder: subfolder-name
|
|
|
|
|
2023-05-13 18:44:44 +00:00
|
|
|
The type of model is given in the stanza key, and is one of
|
|
|
|
{diffusers, ckpt, vae, text_encoder, tokenizer, unet, scheduler,
|
|
|
|
safety_checker, feature_extractor, lora, textual_inversion}, and
|
|
|
|
correspond to items in the SDModelType enum defined in model_cache.py
|
|
|
|
|
|
|
|
The format indicates whether the model is organized as a folder with
|
|
|
|
model subdirectories, or is contained in a single checkpoint or
|
|
|
|
safetensors file.
|
2023-05-06 19:58:44 +00:00
|
|
|
|
|
|
|
One, but not both, of repo_id and path are provided. repo_id is the
|
|
|
|
HuggingFace repository ID of the model, and path points to the file or
|
|
|
|
directory on disk.
|
|
|
|
|
|
|
|
If subfolder is provided, then the model exists in a subdirectory of
|
|
|
|
the main model. These are usually named after the model type, such as
|
|
|
|
"unet".
|
|
|
|
|
2023-05-08 03:18:17 +00:00
|
|
|
This example summarizes the two ways of getting a non-diffuser model:
|
2023-05-06 19:58:44 +00:00
|
|
|
|
2023-05-13 18:44:44 +00:00
|
|
|
text_encoder/clip-test-1:
|
|
|
|
format: folder
|
2023-05-06 19:58:44 +00:00
|
|
|
repo_id: openai/clip-vit-large-patch14
|
|
|
|
description: Returns standalone CLIPTextModel
|
|
|
|
|
2023-05-13 18:44:44 +00:00
|
|
|
text_encoder/clip-test-2:
|
|
|
|
format: folder
|
2023-05-06 19:58:44 +00:00
|
|
|
repo_id: stabilityai/stable-diffusion-2
|
|
|
|
subfolder: text_encoder
|
|
|
|
description: Returns the text_encoder in the subfolder of the diffusers model (just the encoder in RAM)
|
|
|
|
|
2023-05-08 03:18:17 +00:00
|
|
|
SUBMODELS:
|
|
|
|
|
|
|
|
It is also possible to fetch an isolated submodel from a diffusers
|
|
|
|
model. Use the `submodel` parameter to select which part:
|
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
vae = manager.get_model('stable-diffusion-1.5',submodel=SDModelType.Vae)
|
2023-05-08 03:18:17 +00:00
|
|
|
with vae.context as my_vae:
|
|
|
|
print(type(my_vae))
|
|
|
|
# "AutoencoderKL"
|
|
|
|
|
|
|
|
DISAMBIGUATION:
|
|
|
|
|
|
|
|
You may wish to use the same name for a related family of models. To
|
|
|
|
do this, disambiguate the stanza key with the model and and format
|
|
|
|
separated by "/". Example:
|
|
|
|
|
2023-05-13 18:44:44 +00:00
|
|
|
tokenizer/clip-large:
|
2023-05-06 19:58:44 +00:00
|
|
|
format: tokenizer
|
|
|
|
repo_id: openai/clip-vit-large-patch14
|
|
|
|
description: Returns standalone tokenizer
|
2023-05-08 03:18:17 +00:00
|
|
|
|
2023-05-13 18:44:44 +00:00
|
|
|
text_encoder/clip-large:
|
2023-05-08 03:18:17 +00:00
|
|
|
format: text_encoder
|
|
|
|
repo_id: openai/clip-vit-large-patch14
|
|
|
|
description: Returns standalone text encoder
|
|
|
|
|
|
|
|
You can now use the `model_type` argument to indicate which model you
|
|
|
|
want:
|
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
tokenizer = mgr.get('clip-large',model_type=SDModelType.Tokenizer)
|
|
|
|
encoder = mgr.get('clip-large',model_type=SDModelType.TextEncoder)
|
2023-05-08 03:18:17 +00:00
|
|
|
|
|
|
|
OTHER FUNCTIONS:
|
|
|
|
|
|
|
|
Other methods provided by ModelManager support importing, editing,
|
|
|
|
converting and deleting models.
|
|
|
|
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
|
|
|
|
import os
|
|
|
|
import re
|
2023-05-14 20:45:40 +00:00
|
|
|
import sys
|
2023-02-28 05:31:15 +00:00
|
|
|
import textwrap
|
2023-05-13 18:44:44 +00:00
|
|
|
from contextlib import suppress
|
2023-05-05 23:32:28 +00:00
|
|
|
from dataclasses import dataclass
|
2023-04-05 21:25:42 +00:00
|
|
|
from enum import Enum, auto
|
2023-05-13 18:44:44 +00:00
|
|
|
from packaging import version
|
2023-02-28 05:31:15 +00:00
|
|
|
from pathlib import Path
|
2023-05-05 23:32:28 +00:00
|
|
|
from shutil import rmtree
|
2023-05-13 18:44:44 +00:00
|
|
|
from typing import Callable, Optional, List, Tuple, Union, types
|
2023-02-28 05:31:15 +00:00
|
|
|
|
|
|
|
import safetensors
|
|
|
|
import safetensors.torch
|
|
|
|
import torch
|
2023-05-14 22:09:38 +00:00
|
|
|
from diffusers import AutoencoderKL
|
2023-02-28 05:31:15 +00:00
|
|
|
from huggingface_hub import scan_cache_dir
|
|
|
|
from omegaconf import OmegaConf
|
|
|
|
from omegaconf.dictconfig import DictConfig
|
2023-03-03 06:02:00 +00:00
|
|
|
|
2023-05-13 18:44:44 +00:00
|
|
|
import invokeai.backend.util.logging as logger
|
|
|
|
from invokeai.backend.globals import (Globals, global_cache_dir,
|
|
|
|
global_resolve_path)
|
|
|
|
from invokeai.backend.util import download_with_resume
|
2023-05-05 23:32:28 +00:00
|
|
|
|
|
|
|
from ..util import CUDA_DEVICE
|
2023-05-13 18:44:44 +00:00
|
|
|
from .model_cache import (ModelCache, ModelLocker, ModelStatus, SDModelType,
|
2023-05-14 22:09:38 +00:00
|
|
|
SilenceWarnings, DIFFUSERS_PARTS)
|
2023-05-13 18:44:44 +00:00
|
|
|
|
|
|
|
# We are only starting to number the config file with release 3.
|
|
|
|
# The config file version doesn't have to start at release version, but it will help
|
|
|
|
# reduce confusion.
|
|
|
|
CONFIG_FILE_VERSION='3.0.0'
|
2023-05-05 23:32:28 +00:00
|
|
|
|
|
|
|
# wanted to use pydantic here, but Generator objects not supported
|
|
|
|
@dataclass
|
|
|
|
class SDModelInfo():
|
2023-05-06 19:58:44 +00:00
|
|
|
context: ModelLocker
|
2023-05-05 23:32:28 +00:00
|
|
|
name: str
|
2023-05-13 18:44:44 +00:00
|
|
|
type: SDModelType
|
2023-05-05 23:32:28 +00:00
|
|
|
hash: str
|
|
|
|
location: Union[Path,str]
|
|
|
|
precision: torch.dtype
|
|
|
|
subfolder: Path = None
|
|
|
|
revision: str = None
|
|
|
|
_cache: ModelCache = None
|
|
|
|
|
2023-05-13 20:29:18 +00:00
|
|
|
def __enter__(self):
|
|
|
|
return self.context.__enter__()
|
|
|
|
|
|
|
|
def __exit__(self,*args, **kwargs):
|
|
|
|
self.context.__exit__(*args, **kwargs)
|
|
|
|
|
2023-05-06 04:44:12 +00:00
|
|
|
@property
|
2023-05-05 23:32:28 +00:00
|
|
|
def status(self)->ModelStatus:
|
|
|
|
'''Return load status of this model as a model_cache.ModelStatus enum'''
|
|
|
|
if not self._cache:
|
|
|
|
return ModelStatus.unknown
|
|
|
|
return self._cache.status(
|
|
|
|
self.location,
|
|
|
|
revision = self.revision,
|
|
|
|
subfolder = self.subfolder
|
|
|
|
)
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-05-05 23:32:28 +00:00
|
|
|
class InvalidModelError(Exception):
|
|
|
|
"Raised when an invalid model is requested"
|
|
|
|
pass
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-04-05 21:25:42 +00:00
|
|
|
class SDLegacyType(Enum):
|
|
|
|
V1 = auto()
|
|
|
|
V1_INPAINT = auto()
|
|
|
|
V2 = auto()
|
|
|
|
V2_e = auto()
|
|
|
|
V2_v = auto()
|
|
|
|
UNKNOWN = auto()
|
|
|
|
|
2023-05-07 22:07:28 +00:00
|
|
|
MAX_CACHE_SIZE = 6.0 # GB
|
2023-02-28 05:31:15 +00:00
|
|
|
|
|
|
|
class ModelManager(object):
|
2023-04-05 21:25:42 +00:00
|
|
|
"""
|
2023-05-05 23:32:28 +00:00
|
|
|
High-level interface to model management.
|
2023-04-05 21:25:42 +00:00
|
|
|
"""
|
|
|
|
|
2023-04-29 14:48:50 +00:00
|
|
|
logger: types.ModuleType = logger
|
|
|
|
|
2023-02-28 05:31:15 +00:00
|
|
|
def __init__(
|
2023-05-12 20:13:34 +00:00
|
|
|
self,
|
|
|
|
config: Union[Path, DictConfig, str],
|
|
|
|
device_type: torch.device = CUDA_DEVICE,
|
|
|
|
precision: torch.dtype = torch.float16,
|
|
|
|
max_cache_size=MAX_CACHE_SIZE,
|
|
|
|
sequential_offload=False,
|
|
|
|
logger: types.ModuleType = logger,
|
2023-02-28 05:31:15 +00:00
|
|
|
):
|
|
|
|
"""
|
2023-05-05 23:32:28 +00:00
|
|
|
Initialize with the path to the models.yaml config file.
|
|
|
|
Optional parameters are the torch device type, precision, max_models,
|
2023-04-05 21:25:42 +00:00
|
|
|
and sequential_offload boolean. Note that the default device
|
2023-03-09 06:09:54 +00:00
|
|
|
type and precision are set up for a CUDA system running at half precision.
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
2023-05-08 03:18:17 +00:00
|
|
|
if isinstance(config, DictConfig):
|
|
|
|
self.config_path = None
|
2023-05-13 18:44:44 +00:00
|
|
|
self.config = config
|
2023-05-12 01:24:29 +00:00
|
|
|
elif isinstance(config,(str,Path)):
|
2023-05-08 03:18:17 +00:00
|
|
|
self.config_path = config
|
|
|
|
self.config = OmegaConf.load(self.config_path)
|
|
|
|
else:
|
|
|
|
raise ValueError('config argument must be an OmegaConf object, a Path or a string')
|
|
|
|
|
2023-05-13 18:44:44 +00:00
|
|
|
# check config version number and update on disk/RAM if necessary
|
|
|
|
self._update_config_file_version()
|
|
|
|
|
2023-05-05 23:32:28 +00:00
|
|
|
self.cache = ModelCache(
|
2023-05-07 22:07:28 +00:00
|
|
|
max_cache_size=max_cache_size,
|
2023-05-05 23:32:28 +00:00
|
|
|
execution_device = device_type,
|
|
|
|
precision = precision,
|
|
|
|
sequential_offload = sequential_offload,
|
|
|
|
logger = logger,
|
|
|
|
)
|
2023-05-06 19:58:44 +00:00
|
|
|
self.cache_keys = dict()
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger = logger
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-05-13 18:44:44 +00:00
|
|
|
def model_exists(
|
2023-05-12 20:13:34 +00:00
|
|
|
self,
|
|
|
|
model_name: str,
|
2023-05-14 00:06:26 +00:00
|
|
|
model_type: SDModelType = SDModelType.Diffusers,
|
2023-05-12 20:13:34 +00:00
|
|
|
) -> bool:
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
|
|
|
Given a model name, returns True if it is a valid
|
|
|
|
identifier.
|
|
|
|
"""
|
2023-05-13 01:37:20 +00:00
|
|
|
model_key = self.create_key(model_name, model_type)
|
2023-05-12 20:13:34 +00:00
|
|
|
return model_key in self.config
|
|
|
|
|
|
|
|
def create_key(self, model_name: str, model_type: SDModelType) -> str:
|
2023-05-14 00:06:26 +00:00
|
|
|
return f"{model_type}/{model_name}"
|
2023-05-12 20:13:34 +00:00
|
|
|
|
|
|
|
def parse_key(self, model_key: str) -> Tuple[str, SDModelType]:
|
|
|
|
model_type_str, model_name = model_key.split('/', 1)
|
2023-05-14 00:06:26 +00:00
|
|
|
try:
|
|
|
|
model_type = SDModelType(model_type_str)
|
|
|
|
return (model_name, model_type)
|
|
|
|
except:
|
2023-05-13 18:44:44 +00:00
|
|
|
raise Exception(f"Unknown model type: {model_type_str}")
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-05-12 20:13:34 +00:00
|
|
|
def get_model(
|
|
|
|
self,
|
|
|
|
model_name: str,
|
2023-05-14 00:06:26 +00:00
|
|
|
model_type: SDModelType = SDModelType.Diffusers,
|
|
|
|
submodel: Optional[SDModelType] = None,
|
2023-05-12 20:13:34 +00:00
|
|
|
) -> SDModelInfo:
|
2023-05-05 23:32:28 +00:00
|
|
|
"""Given a model named identified in models.yaml, return
|
|
|
|
an SDModelInfo object describing it.
|
|
|
|
:param model_name: symbolic name of the model in models.yaml
|
2023-05-08 03:18:17 +00:00
|
|
|
:param model_type: SDModelType enum indicating the type of model to return
|
2023-05-05 23:32:28 +00:00
|
|
|
:param submodel: an SDModelType enum indicating the portion of
|
2023-05-14 00:06:26 +00:00
|
|
|
the model to retrieve (e.g. SDModelType.Vae)
|
2023-05-08 03:18:17 +00:00
|
|
|
|
|
|
|
If not provided, the model_type will be read from the `format` field
|
|
|
|
of the corresponding stanza. If provided, the model_type will be used
|
|
|
|
to disambiguate stanzas in the configuration file. The default is to
|
|
|
|
assume a diffusers pipeline. The behavior is illustrated here:
|
|
|
|
|
|
|
|
[models.yaml]
|
2023-05-12 20:13:34 +00:00
|
|
|
diffusers/test1:
|
2023-05-08 03:18:17 +00:00
|
|
|
repo_id: foo/bar
|
|
|
|
description: Typical diffusers pipeline
|
|
|
|
|
2023-05-12 20:13:34 +00:00
|
|
|
lora/test1:
|
2023-05-08 03:18:17 +00:00
|
|
|
repo_id: /tmp/loras/test1.safetensors
|
|
|
|
description: Typical lora file
|
|
|
|
|
|
|
|
test1_pipeline = mgr.get_model('test1')
|
|
|
|
# returns a StableDiffusionGeneratorPipeline
|
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
test1_vae1 = mgr.get_model('test1', submodel=SDModelType.Vae)
|
2023-05-08 03:18:17 +00:00
|
|
|
# returns the VAE part of a diffusers model as an AutoencoderKL
|
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
test1_vae2 = mgr.get_model('test1', model_type=SDModelType.Diffusers, submodel=SDModelType.Vae)
|
2023-05-08 03:18:17 +00:00
|
|
|
# does the same thing as the previous statement. Note that model_type
|
|
|
|
# is for the parent model, and submodel is for the part
|
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
test1_lora = mgr.get_model('test1', model_type=SDModelType.Lora)
|
2023-05-08 03:18:17 +00:00
|
|
|
# returns a LoRA embed (as a 'dict' of tensors)
|
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
test1_encoder = mgr.get_modelI('test1', model_type=SDModelType.TextEncoder)
|
2023-05-08 03:18:17 +00:00
|
|
|
# raises an InvalidModelError
|
|
|
|
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
2023-05-14 20:45:40 +00:00
|
|
|
|
|
|
|
# This is a temporary workaround for callers that use "type/name" as the model name
|
|
|
|
# because they haven't adjusted to the new return format of `list_models()`
|
|
|
|
if "/" in model_name:
|
|
|
|
model_key = model_name
|
|
|
|
else:
|
|
|
|
model_key = self.create_key(model_name, model_type)
|
|
|
|
|
2023-05-12 20:13:34 +00:00
|
|
|
# TODO: delete default model or add check that this stable diffusion model
|
|
|
|
# if not model_name:
|
|
|
|
# model_name = self.default_model()
|
2023-04-05 21:25:42 +00:00
|
|
|
|
2023-05-12 20:13:34 +00:00
|
|
|
if model_key not in self.config:
|
2023-05-08 03:18:17 +00:00
|
|
|
raise InvalidModelError(
|
2023-05-12 20:13:34 +00:00
|
|
|
f'"{model_key}" is not a known model name. Please check your models.yaml file'
|
2023-05-08 03:18:17 +00:00
|
|
|
)
|
|
|
|
|
2023-05-12 20:13:34 +00:00
|
|
|
# get the required loading info out of the config file
|
|
|
|
mconfig = self.config[model_key]
|
2023-05-06 19:58:44 +00:00
|
|
|
|
2023-05-12 20:13:34 +00:00
|
|
|
# type already checked as it's part of key
|
2023-05-14 00:06:26 +00:00
|
|
|
if model_type == SDModelType.Diffusers:
|
2023-05-09 03:39:44 +00:00
|
|
|
# intercept stanzas that point to checkpoint weights and replace them
|
|
|
|
# with the equivalent diffusers model
|
2023-05-14 00:06:26 +00:00
|
|
|
if mconfig.format in ["ckpt", "safetensors"]:
|
2023-05-09 03:39:44 +00:00
|
|
|
location = self.convert_ckpt_and_cache(mconfig)
|
|
|
|
else:
|
|
|
|
location = global_resolve_path(mconfig.get('path')) or mconfig.get('repo_id')
|
2023-05-05 23:32:28 +00:00
|
|
|
else:
|
2023-05-12 20:13:34 +00:00
|
|
|
location = global_resolve_path(
|
|
|
|
mconfig.get('path')) \
|
|
|
|
or mconfig.get('repo_id') \
|
|
|
|
or global_resolve_path(mconfig.get('weights')
|
2023-05-05 23:32:28 +00:00
|
|
|
)
|
2023-05-08 03:18:17 +00:00
|
|
|
|
2023-05-05 23:32:28 +00:00
|
|
|
subfolder = mconfig.get('subfolder')
|
2023-05-06 19:58:44 +00:00
|
|
|
revision = mconfig.get('revision')
|
2023-05-12 20:13:34 +00:00
|
|
|
hash = self.cache.model_hash(location, revision)
|
2023-05-06 19:58:44 +00:00
|
|
|
|
2023-05-14 20:45:40 +00:00
|
|
|
# If the caller is asking for part of the model and the config indicates
|
|
|
|
# an external replacement for that field, then we fetch the replacement
|
|
|
|
if submodel and mconfig.get(submodel):
|
|
|
|
location = mconfig.get(submodel).get('path') \
|
|
|
|
or mconfig.get(submodel).get('repo_id')
|
|
|
|
model_type = submodel
|
|
|
|
submodel = None
|
2023-05-12 20:13:34 +00:00
|
|
|
|
2023-05-14 20:45:40 +00:00
|
|
|
# We don't need to load whole model if the user is asking for just a piece of it
|
|
|
|
elif model_type == SDModelType.Diffusers and submodel and not subfolder:
|
2023-05-13 20:54:47 +00:00
|
|
|
model_type = submodel
|
2023-05-14 00:06:26 +00:00
|
|
|
subfolder = submodel.value
|
2023-05-13 20:54:47 +00:00
|
|
|
submodel = None
|
|
|
|
|
2023-05-14 20:45:40 +00:00
|
|
|
# to support the traditional way of attaching a VAE
|
|
|
|
# to a model, we hacked in `attach_model_part`
|
|
|
|
# TODO: generalize this
|
2023-05-14 22:09:38 +00:00
|
|
|
external_parts = set()
|
2023-05-14 20:45:40 +00:00
|
|
|
if model_type == SDModelType.Diffusers:
|
2023-05-14 22:09:38 +00:00
|
|
|
for part in DIFFUSERS_PARTS:
|
|
|
|
with suppress(Exception):
|
|
|
|
if part_config := mconfig.get(part):
|
|
|
|
id = part_config.get('path') or part_config.get('repo_id')
|
|
|
|
subfolder = part_config.get('subfolder')
|
|
|
|
external_parts.add((part, id, subfolder))
|
2023-05-14 20:45:40 +00:00
|
|
|
|
2023-05-05 23:32:28 +00:00
|
|
|
model_context = self.cache.get_model(
|
|
|
|
location,
|
2023-05-06 19:58:44 +00:00
|
|
|
model_type = model_type,
|
2023-05-05 23:32:28 +00:00
|
|
|
revision = revision,
|
|
|
|
subfolder = subfolder,
|
|
|
|
submodel = submodel,
|
2023-05-14 22:09:38 +00:00
|
|
|
attach_model_parts = external_parts,
|
2023-05-05 23:32:28 +00:00
|
|
|
)
|
2023-05-06 19:58:44 +00:00
|
|
|
|
|
|
|
# in case we need to communicate information about this
|
|
|
|
# model to the cache manager, then we need to remember
|
|
|
|
# the cache key
|
2023-05-08 03:18:17 +00:00
|
|
|
self.cache_keys[model_key] = model_context.key
|
2023-05-06 19:58:44 +00:00
|
|
|
|
2023-05-05 23:32:28 +00:00
|
|
|
return SDModelInfo(
|
|
|
|
context = model_context,
|
|
|
|
name = model_name,
|
2023-05-13 18:44:44 +00:00
|
|
|
type = submodel or model_type,
|
2023-05-05 23:32:28 +00:00
|
|
|
hash = hash,
|
|
|
|
location = location,
|
|
|
|
revision = revision,
|
|
|
|
precision = self.cache.precision,
|
|
|
|
subfolder = subfolder,
|
|
|
|
_cache = self.cache
|
|
|
|
)
|
2023-04-05 21:25:42 +00:00
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
def default_model(self) -> Optional[Tuple[str, SDModelType]]:
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
|
|
|
Returns the name of the default model, or None
|
|
|
|
if none is defined.
|
|
|
|
"""
|
2023-05-13 18:44:44 +00:00
|
|
|
for model_name, model_type in self.model_names():
|
|
|
|
model_key = self.create_key(model_name, model_type)
|
|
|
|
if self.config[model_key].get("default"):
|
|
|
|
return (model_name, model_type)
|
|
|
|
return self.model_names()[0][0]
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
def set_default_model(self, model_name: str, model_type: SDModelType=SDModelType.Diffusers) -> None:
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
|
|
|
Set the default model. The change will not take
|
|
|
|
effect until you call model_manager.commit()
|
|
|
|
"""
|
2023-05-13 18:44:44 +00:00
|
|
|
assert self.model_exists(model_name, model_type), f"unknown model '{model_name}'"
|
2023-02-28 05:31:15 +00:00
|
|
|
|
|
|
|
config = self.config
|
2023-05-13 18:44:44 +00:00
|
|
|
for model_name, model_type in self.model_names():
|
|
|
|
key = self.create_key(model_name, model_type)
|
|
|
|
config[key].pop("default", None)
|
|
|
|
config[self.create_key(model_name, model_type)]["default"] = True
|
|
|
|
|
|
|
|
def model_info(
|
2023-05-14 00:06:26 +00:00
|
|
|
self,
|
|
|
|
model_name: str,
|
|
|
|
model_type: SDModelType=SDModelType.Diffusers,
|
2023-05-13 18:44:44 +00:00
|
|
|
) -> dict:
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
|
|
|
Given a model name returns the OmegaConf (dict-like) object describing it.
|
|
|
|
"""
|
2023-05-13 18:44:44 +00:00
|
|
|
if not self.exists(model_name, model_type):
|
2023-02-28 05:31:15 +00:00
|
|
|
return None
|
2023-05-14 00:06:26 +00:00
|
|
|
return self.config[self.create_key(model_name, model_type)]
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-05-13 18:59:21 +00:00
|
|
|
def model_names(self) -> List[Tuple[str, SDModelType]]:
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
2023-05-13 18:44:44 +00:00
|
|
|
Return a list of (str, SDModelType) corresponding to all models
|
|
|
|
known to the configuration.
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
2023-05-14 00:06:26 +00:00
|
|
|
return [(self.parse_key(x)) for x in self.config.keys() if isinstance(self.config[x], DictConfig)]
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
def is_legacy(self, model_name: str, model_type: SDModelType.Diffusers) -> bool:
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
|
|
|
Return true if this is a legacy (.ckpt) model
|
|
|
|
"""
|
|
|
|
# if we are converting legacy files automatically, then
|
|
|
|
# there are no legacy ckpts!
|
|
|
|
if Globals.ckpt_convert:
|
|
|
|
return False
|
2023-05-13 18:44:44 +00:00
|
|
|
info = self.model_info(model_name, model_type)
|
2023-02-28 05:31:15 +00:00
|
|
|
if "weights" in info and info["weights"].endswith((".ckpt", ".safetensors")):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def list_models(self) -> dict:
|
|
|
|
"""
|
2023-05-12 20:13:34 +00:00
|
|
|
Return a dict of models
|
2023-02-28 05:31:15 +00:00
|
|
|
Please use model_manager.models() to get all the model names,
|
|
|
|
model_manager.model_info('model-name') to get the stanza for the model
|
|
|
|
named 'model-name', and model_manager.config to get the full OmegaConf
|
|
|
|
object derived from models.yaml
|
|
|
|
"""
|
|
|
|
models = {}
|
2023-05-12 20:13:34 +00:00
|
|
|
for model_key in sorted(self.config, key=str.casefold):
|
|
|
|
stanza = self.config[model_key]
|
2023-02-28 05:31:15 +00:00
|
|
|
|
|
|
|
# don't include VAEs in listing (legacy style)
|
|
|
|
if "config" in stanza and "/VAE/" in stanza["config"]:
|
|
|
|
continue
|
2023-05-14 00:06:26 +00:00
|
|
|
if model_key == 'config_file_version':
|
2023-05-13 18:44:44 +00:00
|
|
|
continue
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-05-12 20:13:34 +00:00
|
|
|
model_name, model_type = self.parse_key(model_key)
|
2023-05-13 18:44:44 +00:00
|
|
|
models[model_key] = dict()
|
2023-05-12 20:13:34 +00:00
|
|
|
|
|
|
|
# TODO: return all models in future
|
2023-05-14 00:06:26 +00:00
|
|
|
if model_type != SDModelType.Diffusers:
|
2023-05-12 20:13:34 +00:00
|
|
|
continue
|
|
|
|
|
2023-05-13 18:44:44 +00:00
|
|
|
model_format = stanza.get('format')
|
2023-02-28 05:31:15 +00:00
|
|
|
|
|
|
|
# Common Attribs
|
2023-05-05 23:32:28 +00:00
|
|
|
status = self.cache.status(
|
|
|
|
stanza.get('weights') or stanza.get('repo_id'),
|
|
|
|
revision=stanza.get('revision'),
|
2023-05-14 00:06:26 +00:00
|
|
|
subfolder=stanza.get('subfolder'),
|
2023-05-05 23:32:28 +00:00
|
|
|
)
|
2023-02-28 05:31:15 +00:00
|
|
|
description = stanza.get("description", None)
|
2023-05-13 18:44:44 +00:00
|
|
|
models[model_key].update(
|
|
|
|
model_name=model_name,
|
2023-05-14 00:06:26 +00:00
|
|
|
model_type=model_type,
|
2023-05-12 20:13:34 +00:00
|
|
|
format=model_format,
|
2023-05-13 18:44:44 +00:00
|
|
|
description=description,
|
2023-05-14 00:06:26 +00:00
|
|
|
status=status.value,
|
2023-02-28 05:31:15 +00:00
|
|
|
)
|
|
|
|
|
2023-05-05 23:32:28 +00:00
|
|
|
|
2023-02-28 05:31:15 +00:00
|
|
|
# Checkpoint Config Parse
|
2023-05-13 18:44:44 +00:00
|
|
|
if model_format in ["ckpt","safetensors"]:
|
|
|
|
models[model_key].update(
|
2023-05-12 20:13:34 +00:00
|
|
|
config = str(stanza.get("config", None)),
|
|
|
|
weights = str(stanza.get("weights", None)),
|
|
|
|
vae = str(stanza.get("vae", None)),
|
2023-02-28 05:31:15 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# Diffusers Config Parse
|
2023-05-13 18:44:44 +00:00
|
|
|
elif model_format == "folder":
|
2023-05-12 20:13:34 +00:00
|
|
|
if vae := stanza.get("vae", None):
|
|
|
|
if isinstance(vae, DictConfig):
|
|
|
|
vae = dict(
|
|
|
|
repo_id = str(vae.get("repo_id", None)),
|
|
|
|
path = str(vae.get("path", None)),
|
|
|
|
subfolder = str(vae.get("subfolder", None)),
|
|
|
|
)
|
|
|
|
|
2023-05-13 18:44:44 +00:00
|
|
|
models[model_key].update(
|
2023-05-12 20:13:34 +00:00
|
|
|
vae = vae,
|
|
|
|
repo_id = str(stanza.get("repo_id", None)),
|
|
|
|
path = str(stanza.get("path", None)),
|
2023-02-28 05:31:15 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
return models
|
|
|
|
|
|
|
|
def print_models(self) -> None:
|
|
|
|
"""
|
|
|
|
Print a table of models, their descriptions, and load status
|
|
|
|
"""
|
2023-05-13 18:44:44 +00:00
|
|
|
for model_key, model_info in self.list_models().items():
|
|
|
|
line = f'{model_info["model_name"]:25s} {model_info["status"]:>15s} {model_info["model_type"]:10s} {model_info["description"]}'
|
|
|
|
if model_info["status"] in ["in gpu","locked in gpu"]:
|
2023-02-28 05:31:15 +00:00
|
|
|
line = f"\033[1m{line}\033[0m"
|
|
|
|
print(line)
|
|
|
|
|
2023-05-12 20:13:34 +00:00
|
|
|
def del_model(
|
|
|
|
self,
|
|
|
|
model_name: str,
|
2023-05-14 00:06:26 +00:00
|
|
|
model_type: SDModelType.Diffusers,
|
|
|
|
delete_files: bool = False,
|
2023-05-12 20:13:34 +00:00
|
|
|
):
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
|
|
|
Delete the named model.
|
|
|
|
"""
|
2023-05-12 20:13:34 +00:00
|
|
|
model_key = self.create_key(model_name, model_type)
|
|
|
|
model_cfg = self.pop(model_key, None)
|
|
|
|
|
|
|
|
if model_cfg is None:
|
|
|
|
self.logger.error(
|
2023-05-14 00:06:26 +00:00
|
|
|
f"Unknown model {model_key}"
|
|
|
|
)
|
|
|
|
return
|
2023-05-12 20:13:34 +00:00
|
|
|
|
|
|
|
# TODO: some legacy?
|
|
|
|
#if model_name in self.stack:
|
|
|
|
# self.stack.remove(model_name)
|
|
|
|
|
2023-02-28 05:31:15 +00:00
|
|
|
if delete_files:
|
2023-05-13 18:44:44 +00:00
|
|
|
repo_id = model_cfg.get("repo_id", None)
|
|
|
|
path = self._abs_path(model_cfg.get("path", None))
|
|
|
|
weights = self._abs_path(model_cfg.get("weights", None))
|
2023-05-12 20:13:34 +00:00
|
|
|
if "weights" in model_cfg:
|
|
|
|
weights = self._abs_path(model_cfg["weights"])
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.info(f"Deleting file {weights}")
|
2023-02-28 05:31:15 +00:00
|
|
|
Path(weights).unlink(missing_ok=True)
|
2023-05-12 20:13:34 +00:00
|
|
|
|
|
|
|
elif "path" in model_cfg:
|
|
|
|
path = self._abs_path(model_cfg["path"])
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.info(f"Deleting directory {path}")
|
2023-02-28 05:31:15 +00:00
|
|
|
rmtree(path, ignore_errors=True)
|
2023-05-12 20:13:34 +00:00
|
|
|
|
|
|
|
elif "repo_id" in model_cfg:
|
|
|
|
repo_id = model_cfg["repo_id"]
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.info(f"Deleting the cached model directory for {repo_id}")
|
2023-02-28 05:31:15 +00:00
|
|
|
self._delete_model_from_cache(repo_id)
|
|
|
|
|
|
|
|
def add_model(
|
2023-05-12 20:13:34 +00:00
|
|
|
self,
|
|
|
|
model_name: str,
|
|
|
|
model_type: SDModelType,
|
|
|
|
model_attributes: dict,
|
2023-05-14 00:06:26 +00:00
|
|
|
clobber: bool = False,
|
2023-02-28 05:31:15 +00:00
|
|
|
) -> None:
|
|
|
|
"""
|
|
|
|
Update the named model with a dictionary of attributes. Will fail with an
|
|
|
|
assertion error if the name already exists. Pass clobber=True to overwrite.
|
|
|
|
On a successful update, the config will be changed in memory and the
|
|
|
|
method will return True. Will fail with an assertion error if provided
|
|
|
|
attributes are incorrect or the model name is missing.
|
|
|
|
"""
|
2023-05-12 20:13:34 +00:00
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
if model_type == SDModelType.Fiffusers:
|
2023-05-12 20:13:34 +00:00
|
|
|
# TODO: automaticaly or manualy?
|
|
|
|
#assert "format" in model_attributes, 'missing required field "format"'
|
|
|
|
model_format = "ckpt" if "weights" in model_attributes else "diffusers"
|
|
|
|
|
|
|
|
if model_format == "diffusers":
|
|
|
|
assert (
|
|
|
|
"description" in model_attributes
|
|
|
|
), 'required field "description" is missing'
|
|
|
|
assert (
|
|
|
|
"path" in model_attributes or "repo_id" in model_attributes
|
|
|
|
), 'model must have either the "path" or "repo_id" fields defined'
|
|
|
|
|
|
|
|
elif model_format == "ckpt":
|
2023-05-13 18:44:44 +00:00
|
|
|
for field in ("description", "weights", "config"):
|
2023-05-12 20:13:34 +00:00
|
|
|
assert field in model_attributes, f"required field {field} is missing"
|
|
|
|
|
2023-05-06 19:58:44 +00:00
|
|
|
else:
|
|
|
|
assert "weights" in model_attributes and "description" in model_attributes
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-05-12 20:13:34 +00:00
|
|
|
model_key = self.create_key(model_name, model_type)
|
2023-05-08 03:18:17 +00:00
|
|
|
|
2023-02-28 05:31:15 +00:00
|
|
|
assert (
|
2023-05-12 20:13:34 +00:00
|
|
|
clobber or model_key not in self.config
|
2023-05-08 03:18:17 +00:00
|
|
|
), f'attempt to overwrite existing model definition "{model_key}"'
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-05-12 20:13:34 +00:00
|
|
|
self.config[model_key] = model_attributes
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-05-12 20:13:34 +00:00
|
|
|
if "weights" in self.config[model_key]:
|
|
|
|
self.config[model_key]["weights"].replace("\\", "/")
|
2023-05-06 19:58:44 +00:00
|
|
|
|
2023-05-08 03:18:17 +00:00
|
|
|
if clobber and model_key in self.cache_keys:
|
|
|
|
self.cache.uncache_model(self.cache_keys[model_key])
|
|
|
|
del self.cache_keys[model_key]
|
2023-02-28 05:31:15 +00:00
|
|
|
|
|
|
|
def import_diffuser_model(
|
|
|
|
self,
|
|
|
|
repo_or_path: Union[str, Path],
|
|
|
|
model_name: str = None,
|
2023-03-06 04:37:59 +00:00
|
|
|
description: str = None,
|
2023-02-28 05:31:15 +00:00
|
|
|
vae: dict = None,
|
|
|
|
commit_to_conf: Path = None,
|
|
|
|
) -> bool:
|
|
|
|
"""
|
|
|
|
Attempts to install the indicated diffuser model and returns True if successful.
|
|
|
|
|
|
|
|
"repo_or_path" can be either a repo-id or a path-like object corresponding to the
|
|
|
|
top of a downloaded diffusers directory.
|
|
|
|
|
|
|
|
You can optionally provide a model name and/or description. If not provided,
|
|
|
|
then these will be derived from the repo name. If you provide a commit_to_conf
|
|
|
|
path to the configuration file, then the new entry will be committed to the
|
|
|
|
models.yaml file.
|
|
|
|
"""
|
|
|
|
model_name = model_name or Path(repo_or_path).stem
|
2023-04-05 21:25:42 +00:00
|
|
|
model_description = description or f"Imported diffusers model {model_name}"
|
2023-02-28 05:31:15 +00:00
|
|
|
new_config = dict(
|
|
|
|
description=model_description,
|
|
|
|
vae=vae,
|
|
|
|
format="diffusers",
|
|
|
|
)
|
|
|
|
if isinstance(repo_or_path, Path) and repo_or_path.exists():
|
|
|
|
new_config.update(path=str(repo_or_path))
|
|
|
|
else:
|
|
|
|
new_config.update(repo_id=repo_or_path)
|
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
self.add_model(model_name, SDModelType.Diffusers, new_config, True)
|
2023-02-28 05:31:15 +00:00
|
|
|
if commit_to_conf:
|
|
|
|
self.commit(commit_to_conf)
|
2023-05-14 00:06:26 +00:00
|
|
|
return self.create_key(model_name, SDModelType.Diffusers)
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-05-06 19:58:44 +00:00
|
|
|
def import_lora(
|
2023-05-12 20:13:34 +00:00
|
|
|
self,
|
|
|
|
path: Path,
|
2023-05-14 00:06:26 +00:00
|
|
|
model_name: Optional[str] = None,
|
|
|
|
description: Optional[str] = None,
|
2023-05-06 19:58:44 +00:00
|
|
|
):
|
|
|
|
"""
|
|
|
|
Creates an entry for the indicated lora file. Call
|
|
|
|
mgr.commit() to write out the configuration to models.yaml
|
|
|
|
"""
|
|
|
|
path = Path(path)
|
|
|
|
model_name = model_name or path.stem
|
|
|
|
model_description = description or f"LoRA model {model_name}"
|
2023-05-12 20:13:34 +00:00
|
|
|
self.add_model(
|
2023-05-13 18:44:44 +00:00
|
|
|
model_name,
|
2023-05-14 00:06:26 +00:00
|
|
|
SDModelType.Lora,
|
2023-05-12 20:13:34 +00:00
|
|
|
dict(
|
|
|
|
format="lora",
|
|
|
|
weights=str(path),
|
|
|
|
description=model_description,
|
|
|
|
),
|
|
|
|
True
|
|
|
|
)
|
2023-05-06 19:58:44 +00:00
|
|
|
|
|
|
|
def import_embedding(
|
2023-05-12 20:13:34 +00:00
|
|
|
self,
|
|
|
|
path: Path,
|
2023-05-14 00:06:26 +00:00
|
|
|
model_name: Optional[str] = None,
|
|
|
|
description: Optional[str] = None,
|
2023-05-06 19:58:44 +00:00
|
|
|
):
|
|
|
|
"""
|
|
|
|
Creates an entry for the indicated lora file. Call
|
|
|
|
mgr.commit() to write out the configuration to models.yaml
|
|
|
|
"""
|
|
|
|
path = Path(path)
|
|
|
|
if path.is_directory() and (path / "learned_embeds.bin").exists():
|
|
|
|
weights = path / "learned_embeds.bin"
|
|
|
|
else:
|
|
|
|
weights = path
|
|
|
|
|
|
|
|
model_name = model_name or path.stem
|
|
|
|
model_description = description or f"Textual embedding model {model_name}"
|
2023-05-12 20:13:34 +00:00
|
|
|
self.add_model(
|
2023-05-13 18:44:44 +00:00
|
|
|
model_name,
|
2023-05-14 00:06:26 +00:00
|
|
|
SDModelType.TextualInversion,
|
2023-05-12 20:13:34 +00:00
|
|
|
dict(
|
|
|
|
format="textual_inversion",
|
|
|
|
weights=str(weights),
|
|
|
|
description=model_description,
|
|
|
|
),
|
|
|
|
True
|
|
|
|
)
|
2023-05-06 19:58:44 +00:00
|
|
|
|
2023-02-28 05:31:15 +00:00
|
|
|
@classmethod
|
|
|
|
def probe_model_type(self, checkpoint: dict) -> SDLegacyType:
|
|
|
|
"""
|
|
|
|
Given a pickle or safetensors model object, probes contents
|
|
|
|
of the object and returns an SDLegacyType indicating its
|
|
|
|
format. Valid return values include:
|
|
|
|
SDLegacyType.V1
|
|
|
|
SDLegacyType.V1_INPAINT
|
2023-03-06 03:51:40 +00:00
|
|
|
SDLegacyType.V2 (V2 prediction type unknown)
|
|
|
|
SDLegacyType.V2_e (V2 using 'epsilon' prediction type)
|
|
|
|
SDLegacyType.V2_v (V2 using 'v_prediction' prediction type)
|
2023-02-28 05:31:15 +00:00
|
|
|
SDLegacyType.UNKNOWN
|
|
|
|
"""
|
2023-04-05 21:25:42 +00:00
|
|
|
global_step = checkpoint.get("global_step")
|
2023-03-06 03:51:40 +00:00
|
|
|
state_dict = checkpoint.get("state_dict") or checkpoint
|
2023-02-28 05:31:15 +00:00
|
|
|
|
|
|
|
try:
|
2023-03-06 03:51:40 +00:00
|
|
|
key_name = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
|
|
|
|
if key_name in state_dict and state_dict[key_name].shape[-1] == 1024:
|
|
|
|
if global_step == 220000:
|
|
|
|
return SDLegacyType.V2_e
|
|
|
|
elif global_step == 110000:
|
|
|
|
return SDLegacyType.V2_v
|
|
|
|
else:
|
|
|
|
return SDLegacyType.V2
|
|
|
|
# otherwise we assume a V1 file
|
2023-02-28 05:31:15 +00:00
|
|
|
in_channels = state_dict[
|
|
|
|
"model.diffusion_model.input_blocks.0.0.weight"
|
|
|
|
].shape[1]
|
|
|
|
if in_channels == 9:
|
|
|
|
return SDLegacyType.V1_INPAINT
|
|
|
|
elif in_channels == 4:
|
|
|
|
return SDLegacyType.V1
|
|
|
|
else:
|
|
|
|
return SDLegacyType.UNKNOWN
|
|
|
|
except KeyError:
|
|
|
|
return SDLegacyType.UNKNOWN
|
|
|
|
|
|
|
|
def heuristic_import(
|
2023-04-05 21:25:42 +00:00
|
|
|
self,
|
|
|
|
path_url_or_repo: str,
|
2023-05-14 00:06:26 +00:00
|
|
|
model_name: Optional[str] = None,
|
|
|
|
description: Optional[str] = None,
|
|
|
|
model_config_file: Optional[Path] = None,
|
|
|
|
commit_to_conf: Optional[Path] = None,
|
|
|
|
config_file_callback: Optional[Callable[[Path], Path]] = None,
|
2023-02-28 05:31:15 +00:00
|
|
|
) -> str:
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
"""Accept a string which could be:
|
2023-02-28 05:31:15 +00:00
|
|
|
- a HF diffusers repo_id
|
|
|
|
- a URL pointing to a legacy .ckpt or .safetensors file
|
|
|
|
- a local path pointing to a legacy .ckpt or .safetensors file
|
|
|
|
- a local directory containing .ckpt and .safetensors files
|
|
|
|
- a local directory containing a diffusers model
|
|
|
|
|
|
|
|
After determining the nature of the model and downloading it
|
|
|
|
(if necessary), the file is probed to determine the correct
|
|
|
|
configuration file (if needed) and it is imported.
|
|
|
|
|
|
|
|
The model_name and/or description can be provided. If not, they will
|
|
|
|
be generated automatically.
|
|
|
|
|
|
|
|
If commit_to_conf is provided, the newly loaded model will be written
|
|
|
|
to the `models.yaml` file at the indicated path. Otherwise, the changes
|
|
|
|
will only remain in memory.
|
|
|
|
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
The routine will do its best to figure out the config file
|
|
|
|
needed to convert legacy checkpoint file, but if it can't it
|
|
|
|
will call the config_file_callback routine, if provided. The
|
|
|
|
callback accepts a single argument, the Path to the checkpoint
|
|
|
|
file, and returns a Path to the config file to use.
|
|
|
|
|
|
|
|
The (potentially derived) name of the model is returned on
|
|
|
|
success, or None on failure. When multiple models are added
|
|
|
|
from a directory, only the last imported one is returned.
|
|
|
|
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
|
|
|
model_path: Path = None
|
|
|
|
thing = path_url_or_repo # to save typing
|
|
|
|
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.info(f"Probing {thing} for import")
|
2023-02-28 05:31:15 +00:00
|
|
|
|
|
|
|
if thing.startswith(("http:", "https:", "ftp:")):
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.info(f"{thing} appears to be a URL")
|
2023-02-28 05:31:15 +00:00
|
|
|
model_path = self._resolve_path(
|
|
|
|
thing, "models/ldm/stable-diffusion-v1"
|
|
|
|
) # _resolve_path does a download if needed
|
|
|
|
|
|
|
|
elif Path(thing).is_file() and thing.endswith((".ckpt", ".safetensors")):
|
|
|
|
if Path(thing).stem in ["model", "diffusion_pytorch_model"]:
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug(f"{Path(thing).name} appears to be part of a diffusers model. Skipping import")
|
2023-02-28 05:31:15 +00:00
|
|
|
return
|
|
|
|
else:
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug(f"{thing} appears to be a checkpoint file on disk")
|
2023-02-28 05:31:15 +00:00
|
|
|
model_path = self._resolve_path(thing, "models/ldm/stable-diffusion-v1")
|
|
|
|
|
|
|
|
elif Path(thing).is_dir() and Path(thing, "model_index.json").exists():
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug(f"{thing} appears to be a diffusers file on disk")
|
2023-02-28 05:31:15 +00:00
|
|
|
model_name = self.import_diffuser_model(
|
|
|
|
thing,
|
|
|
|
vae=dict(repo_id="stabilityai/sd-vae-ft-mse"),
|
|
|
|
model_name=model_name,
|
|
|
|
description=description,
|
|
|
|
commit_to_conf=commit_to_conf,
|
|
|
|
)
|
|
|
|
|
|
|
|
elif Path(thing).is_dir():
|
|
|
|
if (Path(thing) / "model_index.json").exists():
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug(f"{thing} appears to be a diffusers model.")
|
2023-02-28 05:31:15 +00:00
|
|
|
model_name = self.import_diffuser_model(
|
|
|
|
thing, commit_to_conf=commit_to_conf
|
|
|
|
)
|
|
|
|
else:
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug(f"{thing} appears to be a directory. Will scan for models to import")
|
2023-02-28 05:31:15 +00:00
|
|
|
for m in list(Path(thing).rglob("*.ckpt")) + list(
|
|
|
|
Path(thing).rglob("*.safetensors")
|
|
|
|
):
|
|
|
|
if model_name := self.heuristic_import(
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
str(m), commit_to_conf=commit_to_conf
|
2023-02-28 05:31:15 +00:00
|
|
|
):
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.info(f"{model_name} successfully imported")
|
2023-02-28 05:31:15 +00:00
|
|
|
return model_name
|
|
|
|
|
|
|
|
elif re.match(r"^[\w.+-]+/[\w.+-]+$", thing):
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug(f"{thing} appears to be a HuggingFace diffusers repo_id")
|
2023-02-28 05:31:15 +00:00
|
|
|
model_name = self.import_diffuser_model(
|
|
|
|
thing, commit_to_conf=commit_to_conf
|
|
|
|
)
|
|
|
|
pipeline, _, _, _ = self._load_diffusers_model(self.config[model_name])
|
|
|
|
return model_name
|
|
|
|
else:
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.warning(f"{thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id")
|
2023-02-28 05:31:15 +00:00
|
|
|
|
|
|
|
# Model_path is set in the event of a legacy checkpoint file.
|
|
|
|
# If not set, we're all done
|
|
|
|
if not model_path:
|
|
|
|
return
|
|
|
|
|
|
|
|
if model_path.stem in self.config: # already imported
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug("Already imported. Skipping")
|
2023-03-06 03:51:40 +00:00
|
|
|
return model_path.stem
|
2023-02-28 05:31:15 +00:00
|
|
|
|
|
|
|
# another round of heuristics to guess the correct config file.
|
2023-03-23 19:03:30 +00:00
|
|
|
checkpoint = None
|
2023-04-05 21:25:42 +00:00
|
|
|
if model_path.suffix in [".ckpt", ".pt"]:
|
2023-05-09 03:39:44 +00:00
|
|
|
self.cache.scan_model(model_path, model_path)
|
2023-03-23 19:03:30 +00:00
|
|
|
checkpoint = torch.load(model_path)
|
|
|
|
else:
|
|
|
|
checkpoint = safetensors.torch.load_file(model_path)
|
2023-02-28 05:31:15 +00:00
|
|
|
|
2023-03-06 03:51:40 +00:00
|
|
|
# additional probing needed if no config file provided
|
|
|
|
if model_config_file is None:
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
# look for a like-named .yaml file in same directory
|
|
|
|
if model_path.with_suffix(".yaml").exists():
|
|
|
|
model_config_file = model_path.with_suffix(".yaml")
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug(f"Using config file {model_config_file.name}")
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
|
2023-03-06 03:51:40 +00:00
|
|
|
else:
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
model_type = self.probe_model_type(checkpoint)
|
|
|
|
if model_type == SDLegacyType.V1:
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug("SD-v1 model detected")
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
model_config_file = Path(
|
|
|
|
Globals.root, "configs/stable-diffusion/v1-inference.yaml"
|
|
|
|
)
|
|
|
|
elif model_type == SDLegacyType.V1_INPAINT:
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug("SD-v1 inpainting model detected")
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
model_config_file = Path(
|
2023-04-05 21:25:42 +00:00
|
|
|
Globals.root,
|
|
|
|
"configs/stable-diffusion/v1-inpainting-inference.yaml",
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
)
|
|
|
|
elif model_type == SDLegacyType.V2_v:
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug("SD-v2-v model detected")
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
model_config_file = Path(
|
|
|
|
Globals.root, "configs/stable-diffusion/v2-inference-v.yaml"
|
|
|
|
)
|
|
|
|
elif model_type == SDLegacyType.V2_e:
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug("SD-v2-e model detected")
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
model_config_file = Path(
|
|
|
|
Globals.root, "configs/stable-diffusion/v2-inference.yaml"
|
|
|
|
)
|
|
|
|
elif model_type == SDLegacyType.V2:
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.warning(
|
2023-04-11 13:33:28 +00:00
|
|
|
f"{thing} is a V2 checkpoint file, but its parameterization cannot be determined. Please provide configuration file path."
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
)
|
|
|
|
return
|
|
|
|
else:
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.warning(
|
2023-04-11 13:33:28 +00:00
|
|
|
f"{thing} is a legacy checkpoint file but not a known Stable Diffusion model. Please provide configuration file path."
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
)
|
|
|
|
return
|
|
|
|
|
|
|
|
if not model_config_file and config_file_callback:
|
|
|
|
model_config_file = config_file_callback(model_path)
|
|
|
|
|
|
|
|
# despite our best efforts, we could not find a model config file, so give up
|
|
|
|
if not model_config_file:
|
|
|
|
return
|
|
|
|
|
|
|
|
# look for a custom vae, a like-named file ending with .vae in the same directory
|
|
|
|
vae_path = None
|
|
|
|
for suffix in ["pt", "ckpt", "safetensors"]:
|
|
|
|
if (model_path.with_suffix(f".vae.{suffix}")).exists():
|
|
|
|
vae_path = model_path.with_suffix(f".vae.{suffix}")
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug(f"Using VAE file {vae_path.name}")
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
vae = None if vae_path else dict(repo_id="stabilityai/sd-vae-ft-mse")
|
2023-03-03 06:02:00 +00:00
|
|
|
|
2023-03-03 05:02:15 +00:00
|
|
|
diffuser_path = Path(
|
|
|
|
Globals.root, "models", Globals.converted_ckpts_dir, model_path.stem
|
|
|
|
)
|
2023-05-09 03:39:44 +00:00
|
|
|
with SilenceWarnings():
|
|
|
|
model_name = self.convert_and_import(
|
|
|
|
model_path,
|
|
|
|
diffusers_path=diffuser_path,
|
|
|
|
vae=vae,
|
|
|
|
vae_path=str(vae_path),
|
|
|
|
model_name=model_name,
|
|
|
|
model_description=description,
|
|
|
|
original_config_file=model_config_file,
|
|
|
|
commit_to_conf=commit_to_conf,
|
|
|
|
scan_needed=False,
|
|
|
|
)
|
2023-02-28 05:31:15 +00:00
|
|
|
return model_name
|
|
|
|
|
2023-05-14 00:06:26 +00:00
|
|
|
def convert_ckpt_and_cache(self, mconfig: DictConfig) -> Path:
|
2023-05-09 03:39:44 +00:00
|
|
|
"""
|
|
|
|
Convert the checkpoint model indicated in mconfig into a
|
|
|
|
diffusers, cache it to disk, and return Path to converted
|
|
|
|
file. If already on disk then just returns Path.
|
|
|
|
"""
|
|
|
|
weights = global_resolve_path(mconfig.weights)
|
|
|
|
config_file = global_resolve_path(mconfig.config)
|
|
|
|
diffusers_path = global_resolve_path(Path('models',Globals.converted_ckpts_dir)) / weights.stem
|
|
|
|
|
|
|
|
# return cached version if it exists
|
|
|
|
if diffusers_path.exists():
|
|
|
|
return diffusers_path
|
|
|
|
|
|
|
|
vae_ckpt_path, vae_model = self._get_vae_for_conversion(weights, mconfig)
|
2023-05-12 20:13:34 +00:00
|
|
|
|
2023-05-09 03:39:44 +00:00
|
|
|
# to avoid circular import errors
|
|
|
|
from .convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
|
|
|
|
with SilenceWarnings():
|
|
|
|
convert_ckpt_to_diffusers(
|
|
|
|
weights,
|
|
|
|
diffusers_path,
|
|
|
|
extract_ema=True,
|
|
|
|
original_config_file=config_file,
|
|
|
|
vae=vae_model,
|
2023-05-12 20:13:34 +00:00
|
|
|
vae_path=str(global_resolve_path(vae_ckpt_path)) if vae_ckpt_path else None,
|
2023-05-09 03:39:44 +00:00
|
|
|
scan_needed=True,
|
|
|
|
)
|
|
|
|
return diffusers_path
|
|
|
|
|
2023-05-12 20:13:34 +00:00
|
|
|
def _get_vae_for_conversion(
|
|
|
|
self,
|
|
|
|
weights: Path,
|
|
|
|
mconfig: DictConfig
|
2023-05-14 00:06:26 +00:00
|
|
|
) -> Tuple[Path, AutoencoderKL]:
|
2023-05-09 03:39:44 +00:00
|
|
|
# VAE handling is convoluted
|
|
|
|
# 1. If there is a .vae.ckpt file sharing same stem as weights, then use
|
|
|
|
# it as the vae_path passed to convert
|
|
|
|
vae_ckpt_path = None
|
|
|
|
vae_diffusers_location = None
|
|
|
|
vae_model = None
|
|
|
|
for suffix in ["pt", "ckpt", "safetensors"]:
|
|
|
|
if (weights.with_suffix(f".vae.{suffix}")).exists():
|
|
|
|
vae_ckpt_path = weights.with_suffix(f".vae.{suffix}")
|
|
|
|
self.logger.debug(f"Using VAE file {vae_ckpt_path.name}")
|
|
|
|
if vae_ckpt_path:
|
|
|
|
return (vae_ckpt_path, None)
|
|
|
|
|
|
|
|
# 2. If mconfig has a vae weights path, then we use that as vae_path
|
|
|
|
vae_config = mconfig.get('vae')
|
|
|
|
if vae_config and isinstance(vae_config,str):
|
|
|
|
vae_ckpt_path = vae_config
|
|
|
|
return (vae_ckpt_path, None)
|
|
|
|
|
|
|
|
# 3. If mconfig has a vae dict, then we use it as the diffusers-style vae
|
|
|
|
if vae_config and isinstance(vae_config,DictConfig):
|
|
|
|
vae_diffusers_location = global_resolve_path(vae_config.get('path')) or vae_config.get('repo_id')
|
|
|
|
|
|
|
|
# 4. Otherwise, we use stabilityai/sd-vae-ft-mse "because it works"
|
|
|
|
else:
|
|
|
|
vae_diffusers_location = "stabilityai/sd-vae-ft-mse"
|
|
|
|
|
|
|
|
if vae_diffusers_location:
|
2023-05-14 00:06:26 +00:00
|
|
|
vae_model = self.cache.get_model(vae_diffusers_location, SDModelType.Vae).model
|
2023-05-09 03:39:44 +00:00
|
|
|
return (None, vae_model)
|
|
|
|
|
|
|
|
return (None, None)
|
|
|
|
|
2023-02-28 05:31:15 +00:00
|
|
|
def convert_and_import(
|
2023-04-05 21:25:42 +00:00
|
|
|
self,
|
|
|
|
ckpt_path: Path,
|
|
|
|
diffusers_path: Path,
|
|
|
|
model_name=None,
|
|
|
|
model_description=None,
|
|
|
|
vae: dict = None,
|
|
|
|
vae_path: Path = None,
|
|
|
|
original_config_file: Path = None,
|
|
|
|
commit_to_conf: Path = None,
|
|
|
|
scan_needed: bool = True,
|
2023-02-28 05:31:15 +00:00
|
|
|
) -> str:
|
|
|
|
"""
|
|
|
|
Convert a legacy ckpt weights file to diffuser model and import
|
|
|
|
into models.yaml.
|
|
|
|
"""
|
|
|
|
ckpt_path = self._resolve_path(ckpt_path, "models/ldm/stable-diffusion-v1")
|
|
|
|
if original_config_file:
|
|
|
|
original_config_file = self._resolve_path(
|
|
|
|
original_config_file, "configs/stable-diffusion"
|
|
|
|
)
|
|
|
|
|
|
|
|
new_config = None
|
|
|
|
|
|
|
|
if diffusers_path.exists():
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.error(
|
2023-04-11 13:33:28 +00:00
|
|
|
f"The path {str(diffusers_path)} already exists. Please move or remove it and try again."
|
2023-02-28 05:31:15 +00:00
|
|
|
)
|
|
|
|
return
|
|
|
|
|
|
|
|
model_name = model_name or diffusers_path.name
|
improve importation and conversion of legacy checkpoint files
A long-standing issue with importing legacy checkpoints (both ckpt and
safetensors) is that the user has to identify the correct config file,
either by providing its path or by selecting which type of model the
checkpoint is (e.g. "v1 inpainting"). In addition, some users wish to
provide custom VAEs for use with the model. Currently this is done in
the WebUI by importing the model, editing it, and then typing in the
path to the VAE.
To improve the user experience, the model manager's
`heuristic_import()` method has been enhanced as follows:
1. When initially called, the caller can pass a config file path, in
which case it will be used.
2. If no config file provided, the method looks for a .yaml file in the
same directory as the model which bears the same basename. e.g.
```
my-new-model.safetensors
my-new-model.yaml
```
The yaml file is then used as the configuration file for
importation and conversion.
3. If no such file is found, then the method opens up the checkpoint
and probes it to determine whether it is V1, V1-inpaint or V2.
If it is a V1 format, then the appropriate v1-inference.yaml config
file is used. Unfortunately there are two V2 variants that cannot be
distinguished by introspection.
4. If the probe algorithm is unable to determine the model type, then its
last-ditch effort is to execute an optional callback function that can
be provided by the caller. This callback, named `config_file_callback`
receives the path to the legacy checkpoint and returns the path to the
config file to use. The CLI uses to put up a multiple choice prompt to
the user. The WebUI **could** use this to prompt the user to choose
from a radio-button selection.
5. If the config file cannot be determined, then the import is abandoned.
The user can attach a custom VAE to the imported and converted model
by copying the desired VAE into the same directory as the file to be
imported, and giving it the same basename. E.g.:
```
my-new-model.safetensors
my-new-model.vae.pt
```
For this to work, the VAE must end with ".vae.pt", ".vae.ckpt", or
".vae.safetensors". The indicated VAE will be converted into diffusers
format and stored with the converted models file, so the ".pt" file
can be deleted after conversion.
No facility is currently provided to swap a diffusers VAE at import
time, but this can be done after the fact using the WebUI and CLI's
model editing functions.
2023-03-27 15:27:45 +00:00
|
|
|
model_description = model_description or f"Converted version of {model_name}"
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug(f"Converting {model_name} to diffusers (30-60s)")
|
2023-05-05 23:32:28 +00:00
|
|
|
|
|
|
|
# to avoid circular import errors
|
|
|
|
from .convert_ckpt_to_diffusers import convert_ckpt_to_diffusers
|
|
|
|
|
2023-02-28 05:31:15 +00:00
|
|
|
try:
|
|
|
|
# By passing the specified VAE to the conversion function, the autoencoder
|
|
|
|
# will be built into the model rather than tacked on afterward via the config file
|
2023-04-05 21:25:42 +00:00
|
|
|
vae_model = None
|
2023-03-23 17:14:19 +00:00
|
|
|
if vae:
|
2023-05-09 03:39:44 +00:00
|
|
|
vae_location = global_resolve_path(vae.get('path')) or vae.get('repo_id')
|
2023-05-14 00:06:26 +00:00
|
|
|
vae_model = self.cache.get_model(vae_location, SDModelType.Vae).model
|
2023-04-05 21:25:42 +00:00
|
|
|
vae_path = None
|
2023-03-03 06:02:00 +00:00
|
|
|
convert_ckpt_to_diffusers(
|
2023-02-28 05:31:15 +00:00
|
|
|
ckpt_path,
|
|
|
|
diffusers_path,
|
|
|
|
extract_ema=True,
|
|
|
|
original_config_file=original_config_file,
|
|
|
|
vae=vae_model,
|
2023-03-23 17:14:19 +00:00
|
|
|
vae_path=vae_path,
|
2023-03-23 19:03:30 +00:00
|
|
|
scan_needed=scan_needed,
|
2023-02-28 05:31:15 +00:00
|
|
|
)
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug(
|
2023-04-11 13:33:28 +00:00
|
|
|
f"Success. Converted model is now located at {str(diffusers_path)}"
|
2023-02-28 05:31:15 +00:00
|
|
|
)
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug(f"Writing new config file entry for {model_name}")
|
2023-02-28 05:31:15 +00:00
|
|
|
new_config = dict(
|
|
|
|
path=str(diffusers_path),
|
|
|
|
description=model_description,
|
|
|
|
format="diffusers",
|
|
|
|
)
|
2023-05-14 00:06:26 +00:00
|
|
|
if self.model_exists(model_name, SDModelType.Diffusers):
|
|
|
|
self.del_model(model_name, SDModelType.Diffusers)
|
2023-05-13 18:44:44 +00:00
|
|
|
self.add_model(
|
|
|
|
model_name,
|
2023-05-14 00:06:26 +00:00
|
|
|
SDModelType.Diffusers,
|
2023-05-13 18:44:44 +00:00
|
|
|
new_config,
|
|
|
|
True
|
|
|
|
)
|
2023-02-28 05:31:15 +00:00
|
|
|
if commit_to_conf:
|
|
|
|
self.commit(commit_to_conf)
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.debug("Conversion succeeded")
|
2023-02-28 05:31:15 +00:00
|
|
|
except Exception as e:
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.warning(f"Conversion failed: {str(e)}")
|
|
|
|
self.logger.warning(
|
2023-04-11 13:33:28 +00:00
|
|
|
"If you are trying to convert an inpainting or 2.X model, please indicate the correct config file (e.g. v1-inpainting-inference.yaml)"
|
2023-02-28 05:31:15 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
return model_name
|
|
|
|
|
|
|
|
def search_models(self, search_folder):
|
2023-04-29 14:48:50 +00:00
|
|
|
self.logger.info(f"Finding Models In: {search_folder}")
|
2023-02-28 05:31:15 +00:00
|
|
|
models_folder_ckpt = Path(search_folder).glob("**/*.ckpt")
|
|
|
|
models_folder_safetensors = Path(search_folder).glob("**/*.safetensors")
|
|
|
|
|
|
|
|
ckpt_files = [x for x in models_folder_ckpt if x.is_file()]
|
|
|
|
safetensor_files = [x for x in models_folder_safetensors if x.is_file()]
|
|
|
|
|
|
|
|
files = ckpt_files + safetensor_files
|
|
|
|
|
|
|
|
found_models = []
|
|
|
|
for file in files:
|
|
|
|
location = str(file.resolve()).replace("\\", "/")
|
|
|
|
if (
|
|
|
|
"model.safetensors" not in location
|
|
|
|
and "diffusion_pytorch_model.safetensors" not in location
|
|
|
|
):
|
|
|
|
found_models.append({"name": file.stem, "location": location})
|
|
|
|
|
|
|
|
return search_folder, found_models
|
|
|
|
|
2023-05-07 02:41:19 +00:00
|
|
|
def commit(self, conf_file: Path=None) -> None:
|
2023-02-28 05:31:15 +00:00
|
|
|
"""
|
|
|
|
Write current configuration out to the indicated file.
|
|
|
|
"""
|
|
|
|
yaml_str = OmegaConf.to_yaml(self.config)
|
2023-05-07 02:41:19 +00:00
|
|
|
config_file_path = conf_file or self.config_path
|
2023-05-08 03:18:17 +00:00
|
|
|
assert config_file_path is not None,'no config file path to write to'
|
2023-02-28 05:31:15 +00:00
|
|
|
tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp")
|
|
|
|
with open(tmpfile, "w", encoding="utf-8") as outfile:
|
|
|
|
outfile.write(self.preamble())
|
|
|
|
outfile.write(yaml_str)
|
|
|
|
os.replace(tmpfile, config_file_path)
|
|
|
|
|
|
|
|
def preamble(self) -> str:
|
|
|
|
"""
|
|
|
|
Returns the preamble for the config file.
|
|
|
|
"""
|
|
|
|
return textwrap.dedent(
|
|
|
|
"""\
|
|
|
|
# This file describes the alternative machine learning models
|
|
|
|
# available to InvokeAI script.
|
2023-05-12 20:13:34 +00:00
|
|
|
"""
|
2023-05-08 03:18:17 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2023-04-29 14:48:50 +00:00
|
|
|
@classmethod
|
|
|
|
def _delete_model_from_cache(cls,repo_id):
|
2023-03-05 13:20:24 +00:00
|
|
|
cache_info = scan_cache_dir(global_cache_dir("hub"))
|
2023-02-28 05:31:15 +00:00
|
|
|
|
|
|
|
# I'm sure there is a way to do this with comprehensions
|
|
|
|
# but the code quickly became incomprehensible!
|
|
|
|
hashes_to_delete = set()
|
|
|
|
for repo in cache_info.repos:
|
|
|
|
if repo.repo_id == repo_id:
|
|
|
|
for revision in repo.revisions:
|
|
|
|
hashes_to_delete.add(revision.commit_hash)
|
|
|
|
strategy = cache_info.delete_revisions(*hashes_to_delete)
|
2023-04-29 14:48:50 +00:00
|
|
|
cls.logger.warning(
|
2023-04-11 13:33:28 +00:00
|
|
|
f"Deletion of this model is expected to free {strategy.expected_freed_size_str}"
|
2023-02-28 05:31:15 +00:00
|
|
|
)
|
|
|
|
strategy.execute()
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _abs_path(path: str | Path) -> Path:
|
|
|
|
if path is None or Path(path).is_absolute():
|
|
|
|
return path
|
|
|
|
return Path(Globals.root, path).resolve()
|
|
|
|
|
2023-05-09 03:39:44 +00:00
|
|
|
# This is not the same as global_resolve_path(), which prepends
|
|
|
|
# Globals.root.
|
|
|
|
def _resolve_path(
|
|
|
|
self, source: Union[str, Path], dest_directory: str
|
|
|
|
) -> Optional[Path]:
|
|
|
|
resolved_path = None
|
|
|
|
if str(source).startswith(("http:", "https:", "ftp:")):
|
|
|
|
dest_directory = Path(dest_directory)
|
|
|
|
if not dest_directory.is_absolute():
|
|
|
|
dest_directory = Globals.root / dest_directory
|
|
|
|
dest_directory.mkdir(parents=True, exist_ok=True)
|
|
|
|
resolved_path = download_with_resume(str(source), dest_directory)
|
|
|
|
else:
|
|
|
|
if not os.path.isabs(source):
|
|
|
|
source = os.path.join(Globals.root, source)
|
|
|
|
resolved_path = Path(source)
|
|
|
|
return resolved_path
|
2023-05-13 18:44:44 +00:00
|
|
|
|
|
|
|
def _update_config_file_version(self):
|
|
|
|
"""
|
|
|
|
This gets called at object init time and will update
|
|
|
|
from older versions of the config file to new ones
|
|
|
|
as necessary.
|
|
|
|
"""
|
|
|
|
current_version = self.config.get("config_file_version","1.0.0")
|
|
|
|
if version.parse(current_version) < version.parse(CONFIG_FILE_VERSION):
|
|
|
|
self.logger.info(f'models.yaml version {current_version} detected. Updating to {CONFIG_FILE_VERSION}')
|
|
|
|
|
|
|
|
new_config = OmegaConf.create()
|
|
|
|
new_config["config_file_version"] = CONFIG_FILE_VERSION
|
|
|
|
|
|
|
|
for model_key in self.config:
|
|
|
|
|
|
|
|
old_stanza = self.config[model_key]
|
|
|
|
|
|
|
|
# ignore old and ugly way of associating a legacy
|
|
|
|
# vae with a legacy checkpont model
|
|
|
|
if old_stanza.get("config") and '/VAE/' in old_stanza.get("config"):
|
|
|
|
continue
|
|
|
|
|
|
|
|
# bare keys are updated to be prefixed with 'diffusers/'
|
|
|
|
if '/' not in model_key:
|
|
|
|
new_key = f'diffusers/{model_key}'
|
|
|
|
else:
|
|
|
|
new_key = model_key
|
|
|
|
|
|
|
|
if old_stanza.get('format')=='diffusers':
|
|
|
|
model_format = 'folder'
|
|
|
|
elif old_stanza.get('weights') and Path(old_stanza.get('weights')).suffix == '.ckpt':
|
|
|
|
model_format = 'ckpt'
|
|
|
|
elif old_stanza.get('weights') and Path(old_stanza.get('weights')).suffix == '.safetensors':
|
|
|
|
model_format = 'safetensors'
|
|
|
|
|
|
|
|
# copy fields over manually rather than doing a copy() or deepcopy()
|
|
|
|
# in order to avoid bringing in unwanted fields.
|
|
|
|
new_config[new_key] = dict(
|
|
|
|
description = old_stanza.get('description'),
|
|
|
|
format = model_format,
|
|
|
|
)
|
|
|
|
for field in ["repo_id", "path", "weights", "config", "vae"]:
|
|
|
|
if field_value := old_stanza.get(field):
|
|
|
|
new_config[new_key].update({field: field_value})
|
|
|
|
|
|
|
|
self.config = new_config
|
|
|
|
if self.config_path:
|
|
|
|
self.commit()
|
|
|
|
|