Merge remote-tracking branch 'origin/main' into refactor/model_manager_instantiate

# Conflicts:
#	invokeai/backend/model_management/model_manager.py
This commit is contained in:
Kevin Turner
2023-07-30 11:33:13 -07:00
22 changed files with 342 additions and 311 deletions

View File

@ -181,7 +181,7 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
def download_conversion_models():
target_dir = config.root_path / "models/core/convert"
target_dir = config.models_path / "core/convert"
kwargs = dict() # for future use
try:
logger.info("Downloading core tokenizers and text encoders")

View File

@ -7,7 +7,7 @@ import warnings
from dataclasses import dataclass, field
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Dict, Callable, Union, Set
from typing import List, Dict, Callable, Union, Set, Optional
import requests
from diffusers import DiffusionPipeline
@ -128,7 +128,9 @@ class ModelInstall(object):
model_dict[key] = ModelLoadInfo(**value)
# supplement with entries in models.yaml
installed_models = self.mgr.list_models()
installed_models = [x for x in self.mgr.list_models()]
# suppresses autoloaded models
# installed_models = [x for x in self.mgr.list_models() if not self._is_autoloaded(x)]
for md in installed_models:
base = md["base_model"]
@ -147,6 +149,17 @@ class ModelInstall(object):
)
return {x: model_dict[x] for x in sorted(model_dict.keys(), key=lambda y: model_dict[y].name.lower())}
def _is_autoloaded(self, model_info: dict) -> bool:
path = model_info.get("path")
if not path:
return False
for autodir in ["autoimport_dir", "lora_dir", "embedding_dir", "controlnet_dir"]:
if autodir_path := getattr(self.config, autodir):
autodir_path = self.config.root_path / autodir_path
if Path(path).is_relative_to(autodir_path):
return True
return False
def list_models(self, model_type):
installed = self.mgr.list_models(model_type=model_type)
print(f"Installed models of type `{model_type}`:")
@ -273,6 +286,7 @@ class ModelInstall(object):
logger.error(f"Unable to download {url}. Skipping.")
info = ModelProbe().heuristic_probe(location)
dest = self.config.models_path / info.base_type.value / info.model_type.value / location.name
dest.parent.mkdir(parents=True, exist_ok=True)
models_path = shutil.move(location, dest)
# staged version will be garbage-collected at this time
@ -346,7 +360,7 @@ class ModelInstall(object):
if key in self.datasets:
description = self.datasets[key].get("description") or description
rel_path = self.relative_to_root(path)
rel_path = self.relative_to_root(path, self.config.models_path)
attributes = dict(
path=str(rel_path),
@ -386,8 +400,8 @@ class ModelInstall(object):
attributes.update(dict(config=str(legacy_conf)))
return attributes
def relative_to_root(self, path: Path) -> Path:
root = self.config.root_path
def relative_to_root(self, path: Path, root: Optional[Path] = None) -> Path:
root = root or self.config.root_path
if path.is_relative_to(root):
return path.relative_to(root)
else:

View File

@ -63,7 +63,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.app.services.config import InvokeAIAppConfig, MODEL_CORE
from invokeai.app.services.config import InvokeAIAppConfig
from picklescan.scanner import scan_file_path
from .models import BaseModelType, ModelVariantType
@ -81,7 +81,7 @@ if is_accelerate_available():
from accelerate.utils import set_module_tensor_to_device
logger = InvokeAILogger.getLogger(__name__)
CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().root_path / MODEL_CORE / "convert"
CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().models_path / "core/convert"
def shave_segments(path, n_shave_prefix_segments=1):
@ -1070,7 +1070,7 @@ def convert_controlnet_checkpoint(
extract_ema,
use_linear_projection=None,
cross_attention_dim=None,
precision: torch.dtype = torch.float32,
precision: Optional[torch.dtype] = None,
):
ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True)
ctrlnet_config["upcast_attention"] = upcast_attention
@ -1111,7 +1111,6 @@ def convert_controlnet_checkpoint(
return controlnet.to(precision)
# TO DO - PASS PRECISION
def download_from_original_stable_diffusion_ckpt(
checkpoint_path: str,
model_version: BaseModelType,
@ -1121,7 +1120,7 @@ def download_from_original_stable_diffusion_ckpt(
prediction_type: str = None,
model_type: str = None,
extract_ema: bool = False,
precision: torch.dtype = torch.float32,
precision: Optional[torch.dtype] = None,
scheduler_type: str = "pndm",
num_in_channels: Optional[int] = None,
upcast_attention: Optional[bool] = None,
@ -1194,6 +1193,8 @@ def download_from_original_stable_diffusion_ckpt(
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer)
to use. If this parameter is `None`, the function will load a new instance of [CLIPTokenizer] by itself, if
needed.
precision (`torch.dtype`, *optional*, defauts to `None`):
If not provided the precision will be set to the precision of the original file.
return: A StableDiffusionPipeline object representing the passed-in `.ckpt`/`.safetensors` file.
"""
@ -1252,6 +1253,10 @@ def download_from_original_stable_diffusion_ckpt(
logger.debug(f"model_type = {model_type}; original_config_file = {original_config_file}")
precision_probing_key = "model.diffusion_model.input_blocks.0.0.bias"
logger.debug(f"original checkpoint precision == {checkpoint[precision_probing_key].dtype}")
precision = precision or checkpoint[precision_probing_key].dtype
if original_config_file is None:
key_name_v2_1 = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
key_name_sd_xl_base = "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias"
@ -1279,9 +1284,12 @@ def download_from_original_stable_diffusion_ckpt(
original_config_file = BytesIO(requests.get(config_url).content)
original_config = OmegaConf.load(original_config_file)
if original_config["model"]["params"].get("use_ema") is not None:
extract_ema = original_config["model"]["params"]["use_ema"]
if (
model_version == BaseModelType.StableDiffusion2
and original_config["model"]["params"]["parameterization"] == "v"
and original_config["model"]["params"].get("parameterization") == "v"
):
prediction_type = "v_prediction"
upcast_attention = True
@ -1447,7 +1455,7 @@ def download_from_original_stable_diffusion_ckpt(
if controlnet:
pipe = pipeline_class(
vae=vae.to(precision),
text_encoder=text_model,
text_encoder=text_model.to(precision),
tokenizer=tokenizer,
unet=unet.to(precision),
scheduler=scheduler,
@ -1459,7 +1467,7 @@ def download_from_original_stable_diffusion_ckpt(
else:
pipe = pipeline_class(
vae=vae.to(precision),
text_encoder=text_model,
text_encoder=text_model.to(precision),
tokenizer=tokenizer,
unet=unet.to(precision),
scheduler=scheduler,
@ -1484,8 +1492,8 @@ def download_from_original_stable_diffusion_ckpt(
image_noising_scheduler=image_noising_scheduler,
# regular denoising components
tokenizer=tokenizer,
text_encoder=text_model,
unet=unet,
text_encoder=text_model.to(precision),
unet=unet.to(precision),
scheduler=scheduler,
# vae
vae=vae,
@ -1560,7 +1568,7 @@ def download_from_original_stable_diffusion_ckpt(
if controlnet:
pipe = pipeline_class(
vae=vae.to(precision),
text_encoder=text_model,
text_encoder=text_model.to(precision),
tokenizer=tokenizer,
unet=unet.to(precision),
controlnet=controlnet,
@ -1571,7 +1579,7 @@ def download_from_original_stable_diffusion_ckpt(
else:
pipe = pipeline_class(
vae=vae.to(precision),
text_encoder=text_model,
text_encoder=text_model.to(precision),
tokenizer=tokenizer,
unet=unet.to(precision),
scheduler=scheduler,
@ -1594,9 +1602,9 @@ def download_from_original_stable_diffusion_ckpt(
pipe = StableDiffusionXLPipeline(
vae=vae.to(precision),
text_encoder=text_encoder,
text_encoder=text_encoder.to(precision),
tokenizer=tokenizer,
text_encoder_2=text_encoder_2,
text_encoder_2=text_encoder_2.to(precision),
tokenizer_2=tokenizer_2,
unet=unet.to(precision),
scheduler=scheduler,
@ -1639,7 +1647,7 @@ def download_controlnet_from_original_ckpt(
original_config_file: str,
image_size: int = 512,
extract_ema: bool = False,
precision: torch.dtype = torch.float32,
precision: Optional[torch.dtype] = None,
num_in_channels: Optional[int] = None,
upcast_attention: Optional[bool] = None,
device: str = None,
@ -1680,6 +1688,12 @@ def download_controlnet_from_original_ckpt(
while "state_dict" in checkpoint:
checkpoint = checkpoint["state_dict"]
# use original precision
precision_probing_key = "input_blocks.0.0.bias"
ckpt_precision = checkpoint[precision_probing_key].dtype
logger.debug(f"original controlnet precision = {ckpt_precision}")
precision = precision or ckpt_precision
original_config = OmegaConf.load(original_config_file)
if num_in_channels is not None:
@ -1699,7 +1713,7 @@ def download_controlnet_from_original_ckpt(
cross_attention_dim=cross_attention_dim,
)
return controlnet
return controlnet.to(precision)
def convert_ldm_vae_to_diffusers(checkpoint, vae_config: DictConfig, image_size: int) -> AutoencoderKL:

View File

@ -187,7 +187,9 @@ class ModelCache(object):
# TODO: lock for no copies on simultaneous calls?
cache_entry = self._cached_models.get(key, None)
if cache_entry is None:
self.logger.info(f"Loading model {model_path}, type {base_model}:{model_type}:{submodel}")
self.logger.info(
f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}"
)
# this will remove older cached models until
# there is sufficient room to load the requested model

View File

@ -426,7 +426,7 @@ class ModelManager(object):
return (model_name, base_model, model_type)
def _get_model_cache_path(self, model_path):
return self.app_config.models_path / ".cache" / hashlib.md5(str(model_path).encode()).hexdigest()
return self.resolve_model_path(Path(".cache") / hashlib.md5(str(model_path).encode()).hexdigest())
@classmethod
def initialize_model_config(cls, config_path: Path):
@ -520,7 +520,7 @@ class ModelManager(object):
model_path = getattr(model_config, submodel_type)
is_submodel_override = True
model_path = self.app_config.root_path / model_path
model_path = self.resolve_model_path(model_path)
return model_path, is_submodel_override
def _get_model_config(self, base_model, model_name, model_type) -> ModelConfigBase:
@ -618,7 +618,7 @@ class ModelManager(object):
# expose paths as absolute to help web UI
if path := model_dict.get("path"):
model_dict["path"] = str(self.app_config.root_path / path)
model_dict["path"] = str(self.resolve_model_path(path))
models.append(model_dict)
return models
@ -655,7 +655,7 @@ class ModelManager(object):
self.cache.uncache_model(cache_id)
# if model inside invoke models folder - delete files
model_path = self.app_config.root_path / model_cfg.path
model_path = self.resolve_model_path(model_cfg.path)
cache_path = self._get_model_cache_path(model_path)
if cache_path.exists():
rmtree(str(cache_path))
@ -686,10 +686,9 @@ class ModelManager(object):
The returned dict has the same format as the dict returned by
model_info().
"""
# relativize paths as they go in - this makes it easier to move the root directory around
# relativize paths as they go in - this makes it easier to move the models directory around
if path := model_attributes.get("path"):
if Path(path).is_relative_to(self.app_config.root_path):
model_attributes["path"] = str(Path(path).relative_to(self.app_config.root_path))
model_attributes["path"] = str(self.relative_model_path(Path(path)))
model_class = self._get_implementation(base_model, model_type)
model_config = model_class.create_config(**model_attributes)
@ -747,7 +746,7 @@ class ModelManager(object):
if not model_cfg:
raise ModelNotFoundException(f"Unknown model: {model_key}")
old_path = self.app_config.root_path / model_cfg.path
old_path = self.resolve_model_path(model_cfg.path)
new_name = new_name or model_name
new_base = new_base or base_model
new_key = self.create_key(new_name, new_base, model_type)
@ -756,15 +755,15 @@ class ModelManager(object):
# if this is a model file/directory that we manage ourselves, we need to move it
if old_path.is_relative_to(self.app_config.models_path):
new_path = (
self.app_config.root_path
/ "models"
/ BaseModelType(new_base).value
/ ModelType(model_type).value
/ new_name
new_path = self.resolve_model_path(
Path(
BaseModelType(new_base).value,
ModelType(model_type).value,
new_name,
)
)
move(old_path, new_path)
model_cfg.path = str(new_path.relative_to(self.app_config.root_path))
model_cfg.path = str(new_path.relative_to(self.app_config.models_path))
# clean up caches
old_model_cache = self._get_model_cache_path(old_path)
@ -814,7 +813,7 @@ class ModelManager(object):
**submodel,
)
checkpoint_path = self.app_config.root_path / info["path"]
old_diffusers_path = self.app_config.models_path / model.location
old_diffusers_path = self.resolve_model_path(model.location)
new_diffusers_path = (
dest_directory or self.app_config.models_path / base_model.value / model_type.value
) / model_name
@ -827,7 +826,7 @@ class ModelManager(object):
info["path"] = (
str(new_diffusers_path)
if dest_directory
else str(new_diffusers_path.relative_to(self.app_config.root_path))
else str(new_diffusers_path.relative_to(self.app_config.models_path))
)
info.pop("config")
@ -842,6 +841,15 @@ class ModelManager(object):
return result
def resolve_model_path(self, path: Union[Path, str]) -> Path:
"""return relative paths based on configured models_path"""
return self.app_config.models_path / path
def relative_model_path(self, model_path: Path) -> Path:
if model_path.is_relative_to(self.app_config.models_path):
model_path = model_path.relative_to(self.app_config.models_path)
return model_path
def search_models(self, search_folder):
self.logger.info(f"Finding Models In: {search_folder}")
models_folder_ckpt = Path(search_folder).glob("**/*.ckpt")
@ -915,10 +923,17 @@ class ModelManager(object):
new_models_found = False
self.logger.info(f"Scanning {self.app_config.models_path} for new models")
with Chdir(self.app_config.root_path):
with Chdir(self.app_config.models_path):
for model_key, model_config in list(self.models.items()):
model_name, cur_base_model, cur_model_type = self.parse_key(model_key)
model_path = self.app_config.root_path.absolute() / model_config.path
# Patch for relative path bug in older models.yaml - paths should not
# be starting with a hard-coded 'models'. This will also fix up
# models.yaml when committed.
if model_config.path.startswith("models"):
model_config.path = str(Path(*Path(model_config.path).parts[1:]))
model_path = self.resolve_model_path(model_config.path).absolute()
if not model_path.exists():
model_class = self._get_implementation(cur_base_model, cur_model_type)
if model_class.save_to_config:
@ -937,7 +952,7 @@ class ModelManager(object):
if model_type is not None and cur_model_type != model_type:
continue
model_class = self._get_implementation(cur_base_model, cur_model_type)
models_dir = self.app_config.models_path / cur_base_model.value / cur_model_type.value
models_dir = self.resolve_model_path(Path(cur_base_model.value, cur_model_type.value))
if not models_dir.exists():
continue # TODO: or create all folders?
@ -951,9 +966,7 @@ class ModelManager(object):
if model_key in self.models:
raise DuplicateModelException(f"Model with key {model_key} added twice")
if model_path.is_relative_to(self.app_config.root_path):
model_path = model_path.relative_to(self.app_config.root_path)
model_path = self.relative_model_path(model_path)
model_config: ModelConfigBase = model_class.probe_config(str(model_path))
self.models[model_key] = model_config
new_models_found = True
@ -964,12 +977,11 @@ class ModelManager(object):
except NotImplementedError as e:
self.logger.warning(e)
imported_models = self.autoimport()
imported_models = self.scan_autoimport_directory()
if (new_models_found or imported_models) and self.config_path:
self.commit()
def autoimport(self) -> Dict[str, AddModelResult]:
def scan_autoimport_directory(self) -> Dict[str, AddModelResult]:
"""
Scan the autoimport directory (if defined) and import new models, delete defunct models.
"""
@ -1003,7 +1015,7 @@ class ModelManager(object):
# LS: hacky
# Patch in the SD VAE from core so that it is available for use by the UI
try:
self.heuristic_import({config.root_path / "models/core/convert/sd-vae-ft-mse"})
self.heuristic_import({self.resolve_model_path("core/convert/sd-vae-ft-mse")})
except:
pass

View File

@ -17,6 +17,7 @@ from .base import (
ModelNotFoundException,
)
from invokeai.app.services.config import InvokeAIAppConfig
import invokeai.backend.util.logging as logger
class ControlNetModelFormat(str, Enum):
@ -66,7 +67,7 @@ class ControlNetModel(ModelBase):
child_type: Optional[SubModelType] = None,
):
if child_type is not None:
raise Exception("There is no child models in controlnet model")
raise Exception("There are no child models in controlnet model")
model = None
for variant in ["fp16", None]:
@ -124,9 +125,7 @@ class ControlNetModel(ModelBase):
return model_path
@classmethod
def _convert_controlnet_ckpt_and_cache(
cls,
model_path: str,
output_path: str,
base_model: BaseModelType,
@ -141,6 +140,7 @@ def _convert_controlnet_ckpt_and_cache(
weights = app_config.root_path / model_path
output_path = Path(output_path)
logger.info(f"Converting {weights} to diffusers format")
# return cached version if it exists
if output_path.exists():
return output_path

View File

@ -123,6 +123,7 @@ class StableDiffusion1Model(DiffusersModel):
return _convert_ckpt_and_cache(
version=BaseModelType.StableDiffusion1,
model_config=config,
load_safety_checker=False,
output_path=output_path,
)
else:
@ -259,7 +260,7 @@ def _convert_ckpt_and_cache(
"""
app_config = InvokeAIAppConfig.get_config()
weights = app_config.root_path / model_config.path
weights = app_config.models_path / model_config.path
config_file = app_config.root_path / model_config.config
output_path = Path(output_path)