rewrite of widget display - marshalling needs rewrite

This commit is contained in:
Lincoln Stein 2023-06-15 23:32:33 -04:00
parent 5c740452f6
commit ada7399753
7 changed files with 473 additions and 464 deletions

View File

@ -16,6 +16,7 @@ import shutil
import textwrap
import traceback
import warnings
import yaml
from argparse import Namespace
from pathlib import Path
from shutil import get_terminal_size
@ -25,6 +26,7 @@ from urllib import request
import npyscreen
import transformers
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from huggingface_hub import HfFolder
from huggingface_hub import login as hf_hub_login
from omegaconf import OmegaConf
@ -34,6 +36,8 @@ from transformers import (
CLIPSegForImageSegmentation,
CLIPTextModel,
CLIPTokenizer,
AutoFeatureExtractor,
BertTokenizerFast,
)
import invokeai.configs as configs
@ -58,6 +62,9 @@ from invokeai.backend.install.model_install_backend import (
recommended_datasets,
UserSelections,
)
from invokeai.backend.model_management.model_probe import (
ModelProbe, ModelType, BaseModelType, SchedulerPredictionType
)
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()
@ -81,7 +88,7 @@ INIT_FILE_PREAMBLE = """# InvokeAI initialization file
# or renaming it and then running invokeai-configure again.
"""
logger=None
logger=InvokeAILogger.getLogger()
# --------------------------------------------
def postscript(errors: None):
@ -162,75 +169,91 @@ class ProgressBar:
# ---------------------------------------------
def download_with_progress_bar(model_url: str, model_dest: str, label: str = "the"):
try:
print(f"Installing {label} model file {model_url}...", end="", file=sys.stderr)
logger.info(f"Installing {label} model file {model_url}...")
if not os.path.exists(model_dest):
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
request.urlretrieve(
model_url, model_dest, ProgressBar(os.path.basename(model_dest))
)
print("...downloaded successfully", file=sys.stderr)
logger.info("...downloaded successfully")
else:
print("...exists", file=sys.stderr)
logger.info("...exists")
except Exception:
print("...download failed", file=sys.stderr)
print(f"Error downloading {label} model", file=sys.stderr)
logger.info("...download failed")
logger.info(f"Error downloading {label} model")
print(traceback.format_exc(), file=sys.stderr)
# ---------------------------------------------
# this will preload the Bert tokenizer fles
def download_bert():
print("Installing bert tokenizer...", file=sys.stderr)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from transformers import BertTokenizerFast
def download_conversion_models():
target_dir = config.root_path / 'models/core/convert'
kwargs = dict() # for future use
try:
logger.info('Downloading core tokenizers and text encoders')
download_from_hf(BertTokenizerFast, "bert-base-uncased")
# bert
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
bert = BertTokenizerFast.from_pretrained("bert-base-uncased", **kwargs)
bert.save_pretrained(target_dir / 'bert-base-uncased', safe_serialization=True)
# sd-1
repo_id = 'openai/clip-vit-large-patch14'
download_from_hf(CLIPTokenizer, repo_id, target_dir / 'clip-vit-large-patch14')
download_from_hf(CLIPTextModel, repo_id, target_dir / 'clip-vit-large-patch14')
# sd-2
repo_id = "stabilityai/stable-diffusion-2"
pipeline = CLIPTokenizer.from_pretrained(repo_id, subfolder="tokenizer", **kwargs)
pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'tokenizer', safe_serialization=True)
# ---------------------------------------------
def download_sd1_clip():
print("Installing SD1 clip model...", file=sys.stderr)
version = "openai/clip-vit-large-patch14"
download_from_hf(CLIPTokenizer, version)
download_from_hf(CLIPTextModel, version)
pipeline = CLIPTextModel.from_pretrained(repo_id, subfolder="text_encoder", **kwargs)
pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'text_encoder', safe_serialization=True)
# VAE
logger.info('Downloading stable diffusion VAE')
vae = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse', **kwargs)
vae.save_pretrained(target_dir / 'sd-vae-ft-mse', safe_serialization=True)
# ---------------------------------------------
def download_sd2_clip():
version = "stabilityai/stable-diffusion-2"
print("Installing SD2 clip model...", file=sys.stderr)
download_from_hf(CLIPTokenizer, version, subfolder="tokenizer")
download_from_hf(CLIPTextModel, version, subfolder="text_encoder")
# safety checking
logger.info('Downloading safety checker')
repo_id = "CompVis/stable-diffusion-safety-checker"
pipeline = AutoFeatureExtractor.from_pretrained(repo_id,**kwargs)
pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True)
pipeline = StableDiffusionSafetyChecker.from_pretrained(repo_id,**kwargs)
pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True)
except KeyboardInterrupt:
raise
except Exception as e:
logger.error(str(e))
# ---------------------------------------------
def download_realesrgan():
print("Installing models from RealESRGAN...", file=sys.stderr)
logger.info("Installing models from RealESRGAN...")
model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth"
wdn_model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth"
model_dest = config.root_path / "models/realesrgan/realesr-general-x4v3.pth"
wdn_model_dest = config.root_path / "models/realesrgan/realesr-general-wdn-x4v3.pth"
model_dest = config.root_path / "models/core/upscaling/realesrgan/realesr-general-x4v3.pth"
wdn_model_dest = config.root_path / "models/core/upscaling/realesrgan/realesr-general-wdn-x4v3.pth"
download_with_progress_bar(model_url, str(model_dest), "RealESRGAN")
download_with_progress_bar(wdn_model_url, str(wdn_model_dest), "RealESRGANwdn")
def download_gfpgan():
print("Installing GFPGAN models...", file=sys.stderr)
logger.info("Installing GFPGAN models...")
for model in (
[
"https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth",
"./models/gfpgan/GFPGANv1.4.pth",
"./models/core/face_restoration/gfpgan/GFPGANv1.4.pth",
],
[
"https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth",
"./models/gfpgan/weights/detection_Resnet50_Final.pth",
"./models/core/face_restoration/gfpgan/weights/detection_Resnet50_Final.pth",
],
[
"https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth",
"./models/gfpgan/weights/parsing_parsenet.pth",
"./models/core/face_restoration/gfpgan/weights/parsing_parsenet.pth",
],
):
model_url, model_dest = model[0], config.root_path / model[1]
@ -239,70 +262,32 @@ def download_gfpgan():
# ---------------------------------------------
def download_codeformer():
print("Installing CodeFormer model file...", file=sys.stderr)
logger.info("Installing CodeFormer model file...")
model_url = (
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
)
model_dest = config.root_path / "models/codeformer/codeformer.pth"
model_dest = config.root_path / "models/core/face_restoration/codeformer/codeformer.pth"
download_with_progress_bar(model_url, str(model_dest), "CodeFormer")
# ---------------------------------------------
def download_clipseg():
print("Installing clipseg model for text-based masking...", file=sys.stderr)
logger.info("Installing clipseg model for text-based masking...")
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
try:
download_from_hf(AutoProcessor, CLIPSEG_MODEL)
download_from_hf(CLIPSegForImageSegmentation, CLIPSEG_MODEL)
download_from_hf(AutoProcessor, CLIPSEG_MODEL, config.root_path / 'models/core/misc/clipseg')
download_from_hf(CLIPSegForImageSegmentation, CLIPSEG_MODEL,'models/core/misc/clipseg')
except Exception:
print("Error installing clipseg model:")
print(traceback.format_exc())
logger.info("Error installing clipseg model:")
logger.info(traceback.format_exc())
# -------------------------------------
def download_safety_checker():
print("Installing model for NSFW content detection...", file=sys.stderr)
try:
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from transformers import AutoFeatureExtractor
except ModuleNotFoundError:
print("Error installing NSFW checker model:")
print(traceback.format_exc())
return
safety_model_id = "CompVis/stable-diffusion-safety-checker"
print("AutoFeatureExtractor...", file=sys.stderr)
download_from_hf(AutoFeatureExtractor, safety_model_id)
print("StableDiffusionSafetyChecker...", file=sys.stderr)
download_from_hf(StableDiffusionSafetyChecker, safety_model_id)
# -------------------------------------
def download_vaes():
print("Installing stabilityai VAE...", file=sys.stderr)
try:
# first the diffusers version
repo_id = "stabilityai/sd-vae-ft-mse"
args = dict(
cache_dir=config.cache_dir,
)
if not AutoencoderKL.from_pretrained(repo_id, **args):
raise Exception(f"download of {repo_id} failed")
repo_id = "stabilityai/sd-vae-ft-mse-original"
model_name = "vae-ft-mse-840000-ema-pruned.ckpt"
# next the legacy checkpoint version
if not hf_download_with_resume(
repo_id=repo_id,
model_name=model_name,
model_dir=str(config.root_path / Model_dir / Weights_dir),
):
raise Exception(f"download of {model_name} failed")
except Exception as e:
print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
def download_support_models():
download_realesrgan()
download_gfpgan()
download_codeformer()
download_clipseg()
download_conversion_models()
# -------------------------------------
def get_root(root: str = None) -> str:
@ -657,17 +642,13 @@ def default_user_selections(program_opts: Namespace) -> UserSelections:
# -------------------------------------
def initialize_rootdir(root: Path, yes_to_all: bool = False):
print("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **")
logger.info("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **")
for name in (
"models",
"configs",
"embeddings",
"databases",
"loras",
"controlnets",
"text-inversion-output",
"text-inversion-training-data",
"configs"
):
os.makedirs(os.path.join(root, name), exist_ok=True)
@ -676,6 +657,22 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False):
if not os.path.samefile(configs_src, configs_dest):
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
dest = root / 'models'
for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]:
for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora,
ModelType.ControlNet,ModelType.TextualInversion]:
path = dest / model_base.value / model_type.value
path.mkdir(parents=True, exist_ok=True)
path = dest / 'core'
path.mkdir(parents=True, exist_ok=True)
with open(root / 'configs' / 'models.yaml','w') as yaml_file:
yaml_file.write(yaml.dump({'__metadata__':
{'version':'3.0.0'}
}
)
)
# -------------------------------------
def run_console_ui(
@ -837,7 +834,7 @@ def main():
old_init_file = config.root_path / 'invokeai.init'
new_init_file = config.root_path / 'invokeai.yaml'
if old_init_file.exists() and not new_init_file.exists():
print('** Migrating invokeai.init to invokeai.yaml')
logger.info('** Migrating invokeai.init to invokeai.yaml')
migrate_init_file(old_init_file)
# Load new init file into config
config.parse_args(argv=[],conf=OmegaConf.load(new_init_file))
@ -855,29 +852,21 @@ def main():
if init_options:
write_opts(init_options, new_init_file)
else:
print(
logger.info(
'\n** CANCELLED AT USER\'S REQUEST. USE THE "invoke.sh" LAUNCHER TO RUN LATER **\n'
)
sys.exit(0)
if opt.skip_support_models:
print("\n** SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST **")
logger.info("SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST")
else:
print("\n** CHECKING/UPDATING SUPPORT MODELS **")
download_bert()
download_sd1_clip()
download_sd2_clip()
download_realesrgan()
download_gfpgan()
download_codeformer()
download_clipseg()
download_safety_checker()
download_vaes()
logger.info("CHECKING/UPDATING SUPPORT MODELS")
download_support_models()
if opt.skip_sd_weights:
print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
logger.info("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
elif models_to_download:
print("\n** DOWNLOADING DIFFUSION WEIGHTS **")
logger.info("\n** DOWNLOADING DIFFUSION WEIGHTS **")
process_and_execute(opt, models_to_download)
postscript(errors=errors)

View File

@ -9,7 +9,7 @@ import warnings
from dataclasses import dataclass,field
from pathlib import Path
from tempfile import TemporaryFile
from typing import List, Dict, Callable
from typing import List, Dict, Set, Callable
import requests
from diffusers import AutoencoderKL
@ -20,8 +20,8 @@ from tqdm import tqdm
import invokeai.configs as configs
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import ModelManager, ModelType, BaseModelType
from ..stable_diffusion import StableDiffusionGeneratorPipeline
from ..util.logging import InvokeAILogger
@ -62,7 +62,6 @@ class ModelInstallList:
class UserSelections():
install_models: List[str]= field(default_factory=list)
remove_models: List[str]=field(default_factory=list)
purge_deleted_models: bool=field(default_factory=list)
install_cn_models: List[str] = field(default_factory=list)
remove_cn_models: List[str] = field(default_factory=list)
install_lora_models: List[str] = field(default_factory=list)
@ -72,6 +71,64 @@ class UserSelections():
scan_directory: Path = None
autoscan_on_startup: bool=False
import_model_paths: str=None
@dataclass
class ModelLoadInfo():
name: str
model_type: ModelType
base_type: BaseModelType
path: Path = None
repo_id: str = None
description: str = ''
installed: bool = False
recommended: bool = False
class ModelInstall(object):
def __init__(self,config:InvokeAIAppConfig):
self.config = config
self.mgr = ModelManager(config.model_conf_path)
self.datasets = OmegaConf.load(Dataset_path)
def all_models(self)->Dict[str,ModelLoadInfo]:
'''
Return dict of model_key=>ModelStatus
'''
model_dict = dict()
# first populate with the entries in INITIAL_MODELS.yaml
for key, value in self.datasets.items():
name,base,model_type = ModelManager.parse_key(key)
value['name'] = name
value['base_type'] = base
value['model_type'] = model_type
model_dict[key] = ModelLoadInfo(**value)
# supplement with entries in models.yaml
installed_models = self.mgr.list_models()
for base in installed_models.keys():
for model_type in installed_models[base].keys():
for name, value in installed_models[base][model_type].items():
key = ModelManager.create_key(name, base, model_type)
if key in model_dict:
model_dict[key].installed = True
else:
model_dict[key] = ModelLoadInfo(
name = name,
base_type = base,
model_type = model_type,
description = value.get('description'),
path = value.get('path'),
installed = True,
)
return {x : model_dict[x] for x in sorted(model_dict.keys(),key=lambda y: model_dict[y].name.lower())}
def starter_models(self)->Set[str]:
models = set()
for key, value in self.datasets.items():
name,base,model_type = ModelManager.parse_key(key)
if model_type==ModelType.Pipeline:
models.add(key)
return models
def default_config_file():
return config.model_conf_path
@ -85,6 +142,15 @@ def initial_models():
return Datasets
return (Datasets := OmegaConf.load(Dataset_path)['diffusers'])
def add_models(model_manager, config_file_path: Path, models: List[tuple[str,str,str]]):
print(f'Installing {models}')
def del_models(model_manager, config_file_path: Path, models: List[tuple[str,str,str]]):
for base, model_type, name in models:
logger.info(f"Deleting {name}...")
model_manager.del_model(name, base, model_type)
model_manager.commit(config_file_path)
def install_requested_models(
diffusers: ModelInstallList = None,
controlnet: ModelInstallList = None,
@ -95,9 +161,8 @@ def install_requested_models(
external_models: List[str] = None,
scan_at_startup: bool = False,
precision: str = "float16",
purge_deleted: bool = False,
config_file_path: Path = None,
model_config_file_callback: Callable[[Path],Path] = None
model_config_file_callback: Callable[[Path],Path] = None,
):
"""
Entry point for installing/deleting starter models, or installing external models.
@ -110,40 +175,27 @@ def install_requested_models(
# prevent circular import here
from ..model_management import ModelManager
model_manager = ModelManager(OmegaConf.load(config_file_path), precision=precision)
if controlnet:
model_manager.install_controlnet_models(controlnet.install_models, access_token=access_token)
model_manager.delete_controlnet_models(controlnet.remove_models)
if lora:
model_manager.install_lora_models(lora.install_models, access_token=access_token)
model_manager.delete_lora_models(lora.remove_models)
for x in [controlnet, lora, ti, diffusers]:
if x:
add_models(model_manager, config_file_path, x.install_models)
del_models(model_manager, config_file_path, x.remove_models)
# if diffusers:
if ti:
model_manager.install_ti_models(ti.install_models, access_token=access_token)
model_manager.delete_ti_models(ti.remove_models)
if diffusers:
# TODO: Replace next three paragraphs with calls into new model manager
if diffusers.remove_models and len(diffusers.remove_models) > 0:
logger.info("Processing requested deletions")
for model in diffusers.remove_models:
logger.info(f"{model}...")
model_manager.del_model(model, delete_files=purge_deleted)
model_manager.commit(config_file_path)
if diffusers.install_models and len(diffusers.install_models) > 0:
logger.info("Installing requested models")
downloaded_paths = download_weight_datasets(
models=diffusers.install_models,
access_token=None,
precision=precision,
)
successful = {x:v for x,v in downloaded_paths.items() if v is not None}
if len(successful) > 0:
update_config_file(successful, config_file_path)
if len(successful) < len(diffusers.install_models):
unsuccessful = [x for x in downloaded_paths if downloaded_paths[x] is None]
logger.warning(f"Some of the model downloads were not successful: {unsuccessful}")
# if diffusers.install_models and len(diffusers.install_models) > 0:
# logger.info("Installing requested models")
# downloaded_paths = download_weight_datasets(
# models=diffusers.install_models,
# access_token=None,
# precision=precision,
# )
# successful = {x:v for x,v in downloaded_paths.items() if v is not None}
# if len(successful) > 0:
# update_config_file(successful, config_file_path)
# if len(successful) < len(diffusers.install_models):
# unsuccessful = [x for x in downloaded_paths if downloaded_paths[x] is None]
# logger.warning(f"Some of the model downloads were not successful: {unsuccessful}")
# due to above, we have to reload the model manager because conf file
# was changed behind its back
@ -156,8 +208,8 @@ def install_requested_models(
if len(external_models) > 0:
logger.info("INSTALLING EXTERNAL MODELS")
for path_url_or_repo in external_models:
logger.debug(path_url_or_repo)
try:
logger.debug(f'In install_requested_models; callback = {model_config_file_callback}')
model_manager.heuristic_import(
path_url_or_repo,
commit_to_conf=config_file_path,
@ -280,21 +332,18 @@ def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
# ---------------------------------------------
def download_from_hf(
model_class: object, model_name: str, **kwargs
model_class: object, model_name: str, destination: Path, **kwargs
):
logger = InvokeAILogger.getLogger('InvokeAI')
logger.addFilter(lambda x: 'fp16 is not a valid' not in x.getMessage())
path = config.cache_dir
model = model_class.from_pretrained(
model_name,
cache_dir=path,
resume_download=True,
**kwargs,
)
model_name = "--".join(("models", *model_name.split("/")))
return path / model_name if model else None
model.save_pretrained(destination, safe_serialization=True)
return destination
def _download_diffusion_weights(
mconfig: DictConfig, access_token: str, precision: str = "float32"

View File

@ -305,7 +305,8 @@ class ModelManager(object):
) -> str:
return f"{base_model}/{model_type}/{model_name}"
def parse_key(self, model_key: str) -> Tuple[str, BaseModelType, ModelType]:
@classmethod
def parse_key(cls, model_key: str) -> Tuple[str, BaseModelType, ModelType]:
base_model_str, model_type_str, model_name = model_key.split('/', 2)
try:
model_type = ModelType(model_type_str)
@ -548,7 +549,7 @@ class ModelManager(object):
line = f'{model_info["name"]:25s} {model_info["type"]:10s} {model_info["description"]}'
print(line)
# TODO: test when ui implemented
# Tested - LS
def del_model(
self,
model_name: str,
@ -558,7 +559,6 @@ class ModelManager(object):
"""
Delete the named model.
"""
raise Exception("TODO: del_model") # TODO: redo
model_key = self.create_key(model_name, base_model, model_type)
model_cfg = self.models.pop(model_key, None)
@ -574,10 +574,11 @@ class ModelManager(object):
self.cache.uncache_model(cache_id)
# if model inside invoke models folder - delete files
if model_cfg.path.startswith("models/") or model_cfg.path.startswith("models\\"):
model_path = self.globals.root_dir / model_cfg.path
if model_path.isdir():
shutil.rmtree(str(model_path))
model_path = self.globals.root_path / model_cfg.path
if model_path.is_relative_to(self.globals.models_path):
if model_path.is_dir():
rmtree(str(model_path))
else:
model_path.unlink()
@ -712,5 +713,5 @@ class ModelManager(object):
self.models[model_key] = model_config
new_models_found = True
if new_models_found:
if new_models_found and self.config_path:
self.commit()

View File

@ -1,3 +1,4 @@
import os
import torch
from typing import Optional, Union, Literal
from .base import (

View File

@ -1,107 +1,92 @@
# This file predefines a few models that the user may want to install.
diffusers:
stable-diffusion-1.5:
description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
repo_id: runwayml/stable-diffusion-v1-5
format: diffusers
vae:
repo_id: stabilityai/sd-vae-ft-mse
recommended: True
default: True
sd-inpainting-1.5:
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
repo_id: runwayml/stable-diffusion-inpainting
format: diffusers
vae:
repo_id: stabilityai/sd-vae-ft-mse
recommended: True
stable-diffusion-2.1:
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
repo_id: stabilityai/stable-diffusion-2-1
format: diffusers
recommended: True
sd-inpainting-2.0:
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
repo_id: stabilityai/stable-diffusion-2-inpainting
format: diffusers
recommended: False
analog-diffusion-1.0:
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
repo_id: wavymulder/Analog-Diffusion
format: diffusers
recommended: false
deliberate-1.0:
description: Versatile model that produces detailed images up to 768px (4.27 GB)
format: diffusers
repo_id: XpucT/Deliberate
recommended: False
d&d-diffusion-1.0:
description: Dungeons & Dragons characters (2.13 GB)
format: diffusers
repo_id: 0xJustin/Dungeons-and-Diffusion
recommended: False
dreamlike-photoreal-2.0:
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
format: diffusers
repo_id: dreamlike-art/dreamlike-photoreal-2.0
recommended: False
inkpunk-1.0:
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
format: diffusers
repo_id: Envvi/Inkpunk-Diffusion
recommended: False
openjourney-4.0:
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
format: diffusers
repo_id: prompthero/openjourney
vae:
repo_id: stabilityai/sd-vae-ft-mse
recommended: False
portrait-plus-1.0:
description: An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)
format: diffusers
repo_id: wavymulder/portraitplus
recommended: False
seek-art-mega-1.0:
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
repo_id: coreco/seek.art_MEGA
format: diffusers
vae:
repo_id: stabilityai/sd-vae-ft-mse
recommended: False
trinart-2.0:
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
repo_id: naclbit/trinart_stable_diffusion_v2
format: diffusers
vae:
repo_id: stabilityai/sd-vae-ft-mse
recommended: False
waifu-diffusion-1.4:
description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)
repo_id: hakurei/waifu-diffusion
format: diffusers
vae:
repo_id: stabilityai/sd-vae-ft-mse
recommended: False
controlnet:
canny: lllyasviel/control_v11p_sd15_canny
inpaint: lllyasviel/control_v11p_sd15_inpaint
mlsd: lllyasviel/control_v11p_sd15_mlsd
depth: lllyasviel/control_v11f1p_sd15_depth
normal_bae: lllyasviel/control_v11p_sd15_normalbae
seg: lllyasviel/control_v11p_sd15_seg
lineart: lllyasviel/control_v11p_sd15_lineart
lineart_anime: lllyasviel/control_v11p_sd15s2_lineart_anime
scribble: lllyasviel/control_v11p_sd15_scribble
softedge: lllyasviel/control_v11p_sd15_softedge
shuffle: lllyasviel/control_v11e_sd15_shuffle
tile: lllyasviel/control_v11f1e_sd15_tile
ip2p: lllyasviel/control_v11e_sd15_ip2p
textual_inversion:
'EasyNegative': https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors
'ahx-beta-453407d': sd-concepts-library/ahx-beta-453407d
lora:
'LowRA': https://civitai.com/api/download/models/63006
'Ink scenery': https://civitai.com/api/download/models/83390
'sd-model-finetuned-lora-t4': sayakpaul/sd-model-finetuned-lora-t4
sd-1/pipeline/stable-diffusion-v1-5:
description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
repo_id: runwayml/stable-diffusion-v1-5
recommended: True
sd-1/pipeline/stable-diffusion-inpainting:
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
repo_id: runwayml/stable-diffusion-inpainting
recommended: True
sd-2/pipeline/stable-diffusion-2-1:
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
repo_id: stabilityai/stable-diffusion-2-1
recommended: True
sd-2/pipeline/stable-diffusion-2-inpainting:
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
repo_id: stabilityai/stable-diffusion-2-inpainting
recommended: False
sd-1/pipeline/Analog-Diffusion:
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
repo_id: wavymulder/Analog-Diffusion
recommended: false
sd-1/pipeline/Deliberate:
description: Versatile model that produces detailed images up to 768px (4.27 GB)
repo_id: XpucT/Deliberate
recommended: False
sd-1/pipeline/Dungeons-and-Diffusion:
description: Dungeons & Dragons characters (2.13 GB)
repo_id: 0xJustin/Dungeons-and-Diffusion
recommended: False
sd-1/pipeline/dreamlike-photoreal-2.0:
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
repo_id: dreamlike-art/dreamlike-photoreal-2.0
recommended: False
sd-1/pipeline/Inkpunk-Diffusion:
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
repo_id: Envvi/Inkpunk-Diffusion
recommended: False
sd-1/pipeline/openjourney:
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
repo_id: prompthero/openjourney
recommended: False
sd-1/pipeline/portraitplus:
description: An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)
repo_id: wavymulder/portraitplus
recommended: False
sd-1/pipeline/seek.art_MEGA:
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
recommended: False
sd-1/pipeline/trinart_stable_diffusion_v2:
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
repo_id: naclbit/trinart_stable_diffusion_v2
recommended: False
sd-1/pipeline/waifu-diffusion:
description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)
repo_id: hakurei/waifu-diffusion
recommended: False
sd-1/controlnet/canny:
repo_id: lllyasviel/control_v11p_sd15_canny
sd-1/controlnet/inpaint:
repo_id: lllyasviel/control_v11p_sd15_inpaint
sd-1/controlnet/mlsd:
repo_id: lllyasviel/control_v11p_sd15_mlsd
sd-1/controlnet/depth:
repo_id: lllyasviel/control_v11f1p_sd15_depth
sd-1/controlnet/normal_bae:
repo_id: lllyasviel/control_v11p_sd15_normalbae
sd-1/controlnet/seg:
repo_id: lllyasviel/control_v11p_sd15_seg
sd-1/controlnet/lineart:
repo_id: lllyasviel/control_v11p_sd15_lineart
sd-1/controlnet/lineart_anime:
repo_id: lllyasviel/control_v11p_sd15s2_lineart_anime
sd-1/controlnet/scribble:
repo_id: lllyasviel/control_v11p_sd15_scribble
sd-1/controlnet/softedge:
repo_id: lllyasviel/control_v11p_sd15_softedge
sd-1/controlnet/shuffle:
repo_id: lllyasviel/control_v11e_sd15_shuffle
sd-1/controlnet/tile:
repo_id: lllyasviel/control_v11f1e_sd15_tile
sd-1/controlnet/ip2p:
repo_id: lllyasviel/control_v11e_sd15_ip2p
sd-1/embedding/EasyNegative:
path: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors
sd-1/embedding/ahx-beta-453407d:
repo_id: sd-concepts-library/ahx-beta-453407d
sd-1/lora/LowRA:
path: https://civitai.com/api/download/models/63006
sd-1/lora/Ink Scenery:
path: https://civitai.com/api/download/models/83390
sd-1/lora/sd-model-finetuned-lora-t4:
repo_id: sayakpaul/sd-model-finetuned-lora-t4

View File

@ -31,17 +31,17 @@ from omegaconf import OmegaConf
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.backend.install.model_install_backend import (
Dataset_path,
Dataset_path, # most of these should go!!
default_config_file,
default_dataset,
install_requested_models,
recommended_datasets,
ModelInstallList,
UserSelections,
ModelInstall
)
from invokeai.backend import ModelManager
from invokeai.backend.model_management import ModelManager, BaseModelType, ModelType
from invokeai.backend.util import choose_precision, choose_torch_device
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.install.widgets import (
CenteredTitleText,
MultiSelectColumns,
@ -58,6 +58,7 @@ from invokeai.frontend.install.widgets import (
from invokeai.app.services.config import InvokeAIAppConfig
config = InvokeAIAppConfig.get_config()
logger = InvokeAILogger.getLogger()
# build a table mapping all non-printable characters to None
# for stripping control characters
@ -90,25 +91,10 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
if not config.model_conf_path.exists():
with open(config.model_conf_path,'w') as file:
print('# InvokeAI model configuration file',file=file)
model_manager = ModelManager(config.model_conf_path)
self.starter_models = OmegaConf.load(Dataset_path)['diffusers']
self.installed_diffusers_models = self.list_additional_diffusers_models(
model_manager,
self.starter_models,
)
self.installed_cn_models = model_manager.list_controlnet_models()
self.installed_lora_models = model_manager.list_lora_models()
self.installed_ti_models = model_manager.list_ti_models()
try:
self.existing_models = OmegaConf.load(default_config_file())
except:
self.existing_models = dict()
self.starter_model_list = list(self.starter_models.keys())
self.installed_models = dict()
self.installer = ModelInstall(config)
self.all_models = self.installer.all_models()
self.starter_models = self.installer.starter_models()
self.model_labels = self._get_model_labels()
window_width, window_height = get_terminal_size()
self.nextrely -= 1
@ -141,39 +127,36 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
scroll_exit = True,
)
self.tabs.on_changed = self._toggle_tables
top_of_table = self.nextrely
self.starter_diffusers_models = self.add_starter_diffusers()
self.starter_pipelines = self.add_starter_pipelines()
bottom_of_table = self.nextrely
self.nextrely = top_of_table
self.diffusers_models = self.add_diffusers_widgets(
predefined_models=self.installed_diffusers_models,
model_type='Diffusers',
self.pipeline_models = self.add_model_widgets(
model_type=ModelType.Pipeline,
window_width=window_width,
exclude = self.starter_models
)
bottom_of_table = max(bottom_of_table,self.nextrely)
self.nextrely = top_of_table
self.controlnet_models = self.add_model_widgets(
predefined_models=self.installed_cn_models,
model_type='ControlNet',
model_type=ModelType.ControlNet,
window_width=window_width,
)
bottom_of_table = max(bottom_of_table,self.nextrely)
self.nextrely = top_of_table
self.lora_models = self.add_model_widgets(
predefined_models=self.installed_lora_models,
model_type="LoRA/LyCORIS",
model_type=ModelType.Lora,
window_width=window_width,
)
bottom_of_table = max(bottom_of_table,self.nextrely)
self.nextrely = top_of_table
self.ti_models = self.add_model_widgets(
predefined_models=self.installed_ti_models,
model_type="Textual Inversion Embeddings",
model_type=ModelType.TextualInversion,
window_width=window_width,
)
bottom_of_table = max(bottom_of_table,self.nextrely)
@ -220,18 +203,20 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self._toggle_tables([self.current_tab])
############# diffusers tab ##########
def add_starter_diffusers(self)->dict[str, npyscreen.widget]:
def add_starter_pipelines(self)->dict[str, npyscreen.widget]:
'''Add widgets responsible for selecting diffusers models'''
widgets = dict()
starter_model_labels = self._get_starter_model_labels()
recommended_models = [
models = self.all_models
starters = self.starter_models
starter_model_labels = self.model_labels
recommended_models = set([
x
for x in self.starter_model_list
if self.starter_models[x].get("recommended", False)
]
for x in starters
if models[x].recommended
])
self.installed_models = sorted(
[x for x in list(self.starter_models.keys()) if x in self.existing_models]
[x for x in starters if models[x].installed]
)
widgets.update(
@ -246,55 +231,46 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self.nextrely -= 1
# if user has already installed some initial models, then don't patronize them
# by showing more recommendations
show_recommended = not self.existing_models
show_recommended = len(self.installed_models)==0
keys = [x for x in models.keys() if x in starters]
widgets.update(
models_selected = self.add_widget_intelligent(
MultiSelectColumns,
columns=1,
name="Install Starter Models",
values=starter_model_labels,
values=[starter_model_labels[x] for x in keys],
value=[
self.starter_model_list.index(x)
for x in self.starter_model_list
if (show_recommended and x in recommended_models)\
or (x in self.existing_models)
keys.index(x)
for x in keys
if (show_recommended and models[x].recommended) \
or (x in self.installed_models)
],
max_height=len(starter_model_labels) + 1,
max_height=len(starters) + 1,
relx=4,
scroll_exit=True,
)
),
models = keys,
)
widgets.update(
purge_deleted = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Purge unchecked diffusers models from disk",
value=False,
scroll_exit=True,
relx=4,
)
)
widgets['purge_deleted'].when_value_edited = lambda: self.sync_purge_buttons(widgets['purge_deleted'])
self.nextrely += 1
return widgets
############# Add a set of model install widgets ########
def add_model_widgets(self,
predefined_models: dict[str,bool],
model_type: str,
model_type: ModelType,
window_width: int=120,
install_prompt: str=None,
add_purge_deleted: bool=False,
exclude: set=set(),
)->dict[str,npyscreen.widget]:
'''Generic code to create model selection widgets'''
widgets = dict()
model_list = sorted(predefined_models.keys())
model_list = [x for x in self.all_models if self.all_models[x].model_type==model_type and not x in exclude]
model_labels = [self.model_labels[x] for x in model_list]
if len(model_list) > 0:
max_width = max([len(x) for x in model_list])
max_width = max([len(x) for x in model_labels])
columns = window_width // (max_width+8) # 8 characters for "[x] " and padding
columns = min(len(model_list),columns) or 1
prompt = install_prompt or f"Select the desired {model_type} models to install. Unchecked models will be purged from disk."
prompt = install_prompt or f"Select the desired {model_type.value.title()} models to install. Unchecked models will be purged from disk."
widgets.update(
label1 = self.add_widget_intelligent(
@ -310,31 +286,19 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
MultiSelectColumns,
columns=columns,
name=f"Install {model_type} Models",
values=model_list,
values=model_labels,
value=[
model_list.index(x)
for x in model_list
if predefined_models[x]
if self.all_models[x].installed
],
max_height=len(model_list)//columns + 1,
relx=4,
scroll_exit=True,
)
),
models = model_list,
)
if add_purge_deleted:
self.nextrely += 1
widgets.update(
purge_deleted = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Purge unchecked diffusers models from disk",
value=False,
scroll_exit=True,
relx=4,
)
)
widgets['purge_deleted'].when_value_edited = lambda: self.sync_purge_buttons(widgets['purge_deleted'])
self.nextrely += 1
widgets.update(
download_ids = self.add_widget_intelligent(
@ -349,18 +313,15 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
### Tab for arbitrary diffusers widgets ###
def add_diffusers_widgets(self,
predefined_models: dict[str,bool],
model_type: str='Diffusers',
model_type: ModelType=ModelType.Pipeline,
window_width: int=120,
)->dict[str,npyscreen.widget]:
'''Similar to add_model_widgets() but adds some additional widgets at the bottom
to support the autoload directory'''
widgets = self.add_model_widgets(
predefined_models,
'Diffusers',
window_width,
install_prompt="Additional diffusers models already installed.",
add_purge_deleted=True
model_type = model_type,
window_width = window_width,
install_prompt=f"Additional {model_type.value.title()} models already installed.",
)
label = "Directory to scan for models to automatically import (<tab> autocompletes):"
@ -390,21 +351,17 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
)
return widgets
def sync_purge_buttons(self,checkbox):
value = checkbox.value
self.starter_diffusers_models['purge_deleted'].value = value
self.diffusers_models['purge_deleted'].value = value
def resize(self):
super().resize()
if (s := self.starter_diffusers_models.get("models_selected")):
s.values = self._get_starter_model_labels()
if (s := self.starter_pipelines.get("models_selected")):
keys = [x for x in self.all_models.keys() if x in self.starter_models]
s.values = [self.model_labels[x] for x in keys]
def _toggle_tables(self, value=None):
selected_tab = value[0]
widgets = [
self.starter_diffusers_models,
self.diffusers_models,
self.starter_pipelines,
self.pipeline_models,
self.controlnet_models,
self.lora_models,
self.ti_models,
@ -412,34 +369,38 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
for group in widgets:
for k,v in group.items():
v.hidden = True
v.editable = False
try:
v.hidden = True
v.editable = False
except:
pass
for k,v in widgets[selected_tab].items():
v.hidden = False
if not isinstance(v,(npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)):
v.editable = True
try:
v.hidden = False
if not isinstance(v,(npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)):
v.editable = True
except:
pass
self.__class__.current_tab = selected_tab # for persistence
self.display()
def _get_starter_model_labels(self) -> List[str]:
def _get_model_labels(self) -> dict[str,str]:
window_width, window_height = get_terminal_size()
label_width = 25
checkbox_width = 4
spacing_width = 2
models = self.all_models
label_width = max([len(models[x].name) for x in models])
description_width = window_width - label_width - checkbox_width - spacing_width
im = self.starter_models
names = self.starter_model_list
descriptions = [
im[x].description[0 : description_width - 3] + "..."
if len(im[x].description) > description_width
else im[x].description
for x in names
]
return [
f"%-{label_width}s %s" % (names[x], descriptions[x])
for x in range(0, len(names))
]
result = dict()
for x in models.keys():
description = models[x].description
description = description[0 : description_width - 3] + "..." \
if description and len(description) > description_width \
else description if description else ''
result[x] = f"%-{label_width}s %s" % (models[x].name, description)
return result
def _get_columns(self) -> int:
window_width, window_height = get_terminal_size()
@ -548,8 +509,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
# rebuild the form, saving and restoring some of the fields that need to be preserved.
saved_messages = self.monitor.entry_widget.values
autoload_dir = self.diffusers_models['autoload_directory'].value
autoscan = self.diffusers_models['autoscan_on_startup'].value
autoload_dir = self.pipeline_models['autoload_directory'].value
autoscan = self.pipeline_models['autoscan_on_startup'].value
app.main_form = app.addForm(
"MAIN", addModelsForm, name="Install Stable Diffusion Models", multipage=self.multipage,
@ -558,23 +519,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
app.main_form.monitor.entry_widget.values = saved_messages
app.main_form.monitor.entry_widget.buffer([''],scroll_end=True)
app.main_form.diffusers_models['autoload_directory'].value = autoload_dir
app.main_form.diffusers_models['autoscan_on_startup'].value = autoscan
###############################################################
def list_additional_diffusers_models(self,
manager: ModelManager,
starters:dict
)->dict[str,bool]:
'''Return a dict of all the currently installed models that are not on the starter list'''
model_info = manager.list_models()
additional_models = {
x:True for x in model_info \
if model_info[x]['format']=='diffusers' \
and x not in starters
}
return additional_models
app.main_form.pipeline_models['autoload_directory'].value = autoload_dir
app.main_form.pipeline_models['autoscan_on_startup'].value = autoscan
def marshall_arguments(self):
"""
@ -591,24 +537,20 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
selections = self.parentApp.user_selections
# Starter models to install/remove
starter_models = dict(
map(
lambda x: (self.starter_model_list[x], True),
self.starter_diffusers_models['models_selected'].value,
)
)
selections.purge_deleted_models = self.starter_diffusers_models['purge_deleted'].value or \
self.diffusers_models['purge_deleted'].value
selections.install_models = [x for x in starter_models if x not in self.existing_models]
selections.remove_models = [x for x in self.starter_model_list if x in self.existing_models and x not in starter_models]
# TO DO - turn these into a dict so we don't have to hard-code the attributes
print(f'installed={[x for x in self.all_models if self.all_models[x].installed]}',file=f)
for section in [self.starter_pipelines, self.pipeline_models,
self.controlnet_models, self.lora_models, self.ti_models]:
selected = set([section['models'][x] for x in section['models_selected'].value])
models_to_install = [x for x in selected if not self.all_models[x].installed]
models_to_remove = [x for x in section['models'] if x not in selected and self.all_models[x].installed]
# "More" models
selections.import_model_paths = self.diffusers_models['download_ids'].value.split()
if diffusers_selected := self.diffusers_models.get('models_selected'):
selections.import_model_paths = self.pipeline_models['download_ids'].value.split()
if diffusers_selected := self.pipeline_models.get('models_selected'):
selections.remove_models.extend([x
for x in diffusers_selected.values
if self.installed_diffusers_models[x]
if self.installed_pipeline_models[x]
and diffusers_selected.values.index(x) not in diffusers_selected.value
]
)
@ -659,9 +601,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
selections.install_ti_models.extend(additional_tis)
# load directory and whether to scan on startup
selections.scan_directory = self.diffusers_models['autoload_directory'].value
selections.autoscan_on_startup = self.diffusers_models['autoscan_on_startup'].value
selections.scan_directory = self.pipeline_models['autoload_directory'].value
selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value
class AddModelApplication(npyscreen.NPSAppManaged):
def __init__(self,opt):
@ -761,19 +702,19 @@ def process_and_execute(opt: Namespace,
directory_to_scan = selections.scan_directory
scan_at_startup = selections.autoscan_on_startup
potential_models_to_install = selections.import_model_paths
name_map = selections.model_name_map
install_requested_models(
diffusers = ModelInstallList(models_to_install, models_to_remove),
controlnet = ModelInstallList(selections.install_cn_models, selections.remove_cn_models),
lora = ModelInstallList(selections.install_lora_models, selections.remove_lora_models),
ti = ModelInstallList(selections.install_ti_models, selections.remove_ti_models),
diffusers = ModelInstallList(models_to_install, [name_map[ModelType.Pipeline][x] for x in models_to_remove]),
controlnet = ModelInstallList(selections.install_cn_models, [name_map[ModelType.ControlNet][x] for x in selections.remove_cn_models]),
lora = ModelInstallList(selections.install_lora_models, [name_map[ModelType.Lora][x] for x in selections.remove_lora_models]),
ti = ModelInstallList(selections.install_ti_models, [name_map[ModelType.TextualInversion][x] for x in selections.remove_ti_models]),
scan_directory=Path(directory_to_scan) if directory_to_scan else None,
external_models=potential_models_to_install,
scan_at_startup=scan_at_startup,
precision="float32"
if opt.full_precision
else choose_precision(torch.device(choose_torch_device())),
purge_deleted=selections.purge_deleted_models,
config_file_path=Path(opt.config_file) if opt.config_file else config.model_conf_path,
model_config_file_callback = lambda x: ask_user_for_config_file(x,conn_out)
)
@ -828,7 +769,6 @@ def select_and_download_models(opt: Namespace):
ti=ModelInstallList(**{action:opt.textual_inversions or []}),
lora=ModelInstallList(**{action:opt.loras or []}),
precision=precision,
purge_deleted=True,
model_config_file_callback=lambda x: ask_user_for_config_file(x),
)
elif opt.default_only:

View File

@ -24,14 +24,32 @@ from transformers import (
)
import invokeai.backend.util.logging as logger
from invokeai.backend.model_management import ModelManager
from invokeai.backend.model_management.model_probe import (
ModelProbe, ModelType, BaseModelType
ModelProbe, ModelType, BaseModelType, SchedulerPredictionType, ModelVariantInfo
)
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()
diffusers.logging.set_verbosity_error()
model_names = set()
def unique_name(name,info)->str:
done = False
key = ModelManager.create_key(name,info.base_type,info.model_type)
unique_name = key
counter = 1
while not done:
if unique_name in model_names:
unique_name = f'{key}-{counter:0>2d}'
counter += 1
else:
done = True
model_names.add(unique_name)
name,_,_ = ModelManager.parse_key(unique_name)
return name
def create_directory_structure(dest: Path):
for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]:
for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora,
@ -113,10 +131,10 @@ def migrate_conversion_models(dest_directory: Path):
# sd-1
repo_id = 'openai/clip-vit-large-patch14'
pipeline = CLIPTokenizer.from_pretrained(repo_id, **kwargs)
pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14', safe_serialization=True)
pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14' / 'tokenizer', safe_serialization=True)
pipeline = CLIPTextModel.from_pretrained(repo_id, **kwargs)
pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14', safe_serialization=True)
pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14' / 'text_encoder', safe_serialization=True)
# sd-2
repo_id = "stabilityai/stable-diffusion-2"
@ -153,12 +171,48 @@ def migrate_tuning_models(dest: Path):
logger.info(f'Scanning {subdir}')
migrate_models(src, dest)
def write_yaml(model_name: str, path:Path, info:ModelVariantInfo, dest_yaml: io.TextIOBase):
name = unique_name(model_name, info)
stanza = {
f'{info.base_type.value}/{info.model_type.value}/{name}': {
'name': model_name,
'path': str(path),
'description': f'diffusers model {model_name}',
'format': 'diffusers',
'image_size': info.image_size,
'base': info.base_type.value,
'variant': info.variant_type.value,
'prediction_type': info.prediction_type.value,
'upcast_attention': info.prediction_type == SchedulerPredictionType.VPrediction
}
}
dest_yaml.write(yaml.dump(stanza))
dest_yaml.flush()
def migrate_converted(dest_dir: Path, dest_yaml: io.TextIOBase):
for sub_dir in [Path('./models/converted_ckpts'),Path('./models/optimize-ckpts')]:
for model in sub_dir.glob('*'):
if not model.is_dir():
continue
info = ModelProbe().heuristic_probe(model)
if not info:
continue
dest = Path(dest_dir, info.base_type.value, info.model_type.value, model.name)
try:
copy_dir(model,dest)
rel_path = Path('models',dest.relative_to(dest_dir))
write_yaml(model.name,path=rel_path,info=info, dest_yaml=dest_yaml)
except KeyboardInterrupt:
raise
except Exception as e:
logger.warning(f'Could not migrate the converted diffusers {model.name}: {str(e)}. Skipping.')
def migrate_pipelines(dest_dir: Path, dest_yaml: io.TextIOBase):
cache = Path('./models/hub')
kwargs = dict(
cache_dir = cache,
local_files_only = True,
safety_checker = None,
# local_files_only = True,
)
for model in cache.glob('models--*'):
if len(list(model.glob('snapshots/**/model_index.json')))==0:
@ -166,38 +220,26 @@ def migrate_pipelines(dest_dir: Path, dest_yaml: io.TextIOBase):
_,owner,repo_name=model.name.split('--')
repo_id = f'{owner}/{repo_name}'
revisions = [x.name for x in model.glob('refs/*')]
for revision in revisions:
logger.info(f'Migrating {repo_id}, revision {revision}')
try:
pipeline = StableDiffusionPipeline.from_pretrained(
repo_id,
revision=revision,
**kwargs)
info = ModelProbe().heuristic_probe(pipeline)
if not info:
continue
dest = Path(dest_dir, info.base_type.value, info.model_type.value, f'{repo_name}-{revision}')
pipeline.save_pretrained(dest, safe_serialization=True)
rel_path = Path('models',dest.relative_to(dest_dir))
stanza = {
f'{info.base_type.value}/{info.model_type.value}/{repo_name}-{revision}':
{
'name': repo_name,
'path': str(rel_path),
'description': f'diffusers model {repo_id}',
'format': 'diffusers',
'image_size': info.image_size,
'base': info.base_type.value,
'variant': info.variant_type.value,
'prediction_type': info.prediction_type.value,
}
}
print(yaml.dump(stanza),file=dest_yaml,end="")
dest_yaml.flush()
except KeyboardInterrupt:
raise
except Exception as e:
logger.warning(f'Could not load the "{revision}" version of {repo_id}. Skipping.')
# if an fp16 is available we use that
revision = 'fp16' if len(revisions) > 1 and 'fp16' in revisions else revisions[0]
logger.info(f'Migrating {repo_id}, revision {revision}')
try:
pipeline = StableDiffusionPipeline.from_pretrained(
repo_id,
revision=revision,
**kwargs)
info = ModelProbe().heuristic_probe(pipeline)
if not info:
continue
dest = Path(dest_dir, info.base_type.value, info.model_type.value, f'{repo_name}')
pipeline.save_pretrained(dest, safe_serialization=True)
rel_path = Path('models',dest.relative_to(dest_dir))
write_yaml(repo_name, path=rel_path, info=info, dest_yaml=dest_yaml)
except KeyboardInterrupt:
raise
except Exception as e:
logger.warning(f'Could not load the "{revision}" version of {repo_id}. Skipping.')
def migrate_checkpoints(dest_dir: Path, dest_yaml: io.TextIOBase):
# find any checkpoints referred to in old models.yaml
@ -218,6 +260,7 @@ def migrate_checkpoints(dest_dir: Path, dest_yaml: io.TextIOBase):
dest = Path(dest_dir, info.base_type.value, info.model_type.value,weights.name)
copy_file(weights,dest)
weights = Path('models', info.base_type.value, info.model_type.value,weights.name)
model_name = unique_name(model_name, info)
stanza = {
f'{info.base_type.value}/{info.model_type.value}/{model_name}':
{
@ -261,15 +304,16 @@ def main():
os.chdir(root_directory)
with open(dest_yaml,'w') as yaml_file:
print(yaml.dump({'__metadata__':
{'version':'3.0.0'}
}
),file=yaml_file,end=""
)
yaml_file.write(yaml.dump({'__metadata__':
{'version':'3.0.0'}
}
)
)
create_directory_structure(dest_directory)
migrate_support_models(dest_directory)
migrate_conversion_models(dest_directory)
migrate_tuning_models(dest_directory)
migrate_converted(dest_directory,yaml_file)
migrate_pipelines(dest_directory,yaml_file)
migrate_checkpoints(dest_directory,yaml_file)