rewrite of widget display - marshalling needs rewrite

This commit is contained in:
Lincoln Stein
2023-06-15 23:32:33 -04:00
parent 5c740452f6
commit ada7399753
7 changed files with 473 additions and 464 deletions

View File

@ -16,6 +16,7 @@ import shutil
import textwrap import textwrap
import traceback import traceback
import warnings import warnings
import yaml
from argparse import Namespace from argparse import Namespace
from pathlib import Path from pathlib import Path
from shutil import get_terminal_size from shutil import get_terminal_size
@ -25,6 +26,7 @@ from urllib import request
import npyscreen import npyscreen
import transformers import transformers
from diffusers import AutoencoderKL from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from huggingface_hub import HfFolder from huggingface_hub import HfFolder
from huggingface_hub import login as hf_hub_login from huggingface_hub import login as hf_hub_login
from omegaconf import OmegaConf from omegaconf import OmegaConf
@ -34,6 +36,8 @@ from transformers import (
CLIPSegForImageSegmentation, CLIPSegForImageSegmentation,
CLIPTextModel, CLIPTextModel,
CLIPTokenizer, CLIPTokenizer,
AutoFeatureExtractor,
BertTokenizerFast,
) )
import invokeai.configs as configs import invokeai.configs as configs
@ -58,6 +62,9 @@ from invokeai.backend.install.model_install_backend import (
recommended_datasets, recommended_datasets,
UserSelections, UserSelections,
) )
from invokeai.backend.model_management.model_probe import (
ModelProbe, ModelType, BaseModelType, SchedulerPredictionType
)
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error() transformers.logging.set_verbosity_error()
@ -81,7 +88,7 @@ INIT_FILE_PREAMBLE = """# InvokeAI initialization file
# or renaming it and then running invokeai-configure again. # or renaming it and then running invokeai-configure again.
""" """
logger=None logger=InvokeAILogger.getLogger()
# -------------------------------------------- # --------------------------------------------
def postscript(errors: None): def postscript(errors: None):
@ -162,75 +169,91 @@ class ProgressBar:
# --------------------------------------------- # ---------------------------------------------
def download_with_progress_bar(model_url: str, model_dest: str, label: str = "the"): def download_with_progress_bar(model_url: str, model_dest: str, label: str = "the"):
try: try:
print(f"Installing {label} model file {model_url}...", end="", file=sys.stderr) logger.info(f"Installing {label} model file {model_url}...")
if not os.path.exists(model_dest): if not os.path.exists(model_dest):
os.makedirs(os.path.dirname(model_dest), exist_ok=True) os.makedirs(os.path.dirname(model_dest), exist_ok=True)
request.urlretrieve( request.urlretrieve(
model_url, model_dest, ProgressBar(os.path.basename(model_dest)) model_url, model_dest, ProgressBar(os.path.basename(model_dest))
) )
print("...downloaded successfully", file=sys.stderr) logger.info("...downloaded successfully")
else: else:
print("...exists", file=sys.stderr) logger.info("...exists")
except Exception: except Exception:
print("...download failed", file=sys.stderr) logger.info("...download failed")
print(f"Error downloading {label} model", file=sys.stderr) logger.info(f"Error downloading {label} model")
print(traceback.format_exc(), file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
# --------------------------------------------- def download_conversion_models():
# this will preload the Bert tokenizer fles target_dir = config.root_path / 'models/core/convert'
def download_bert(): kwargs = dict() # for future use
print("Installing bert tokenizer...", file=sys.stderr) try:
with warnings.catch_warnings(): logger.info('Downloading core tokenizers and text encoders')
warnings.filterwarnings("ignore", category=DeprecationWarning)
from transformers import BertTokenizerFast
download_from_hf(BertTokenizerFast, "bert-base-uncased") # bert
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
bert = BertTokenizerFast.from_pretrained("bert-base-uncased", **kwargs)
bert.save_pretrained(target_dir / 'bert-base-uncased', safe_serialization=True)
# sd-1
repo_id = 'openai/clip-vit-large-patch14'
download_from_hf(CLIPTokenizer, repo_id, target_dir / 'clip-vit-large-patch14')
download_from_hf(CLIPTextModel, repo_id, target_dir / 'clip-vit-large-patch14')
# sd-2
repo_id = "stabilityai/stable-diffusion-2"
pipeline = CLIPTokenizer.from_pretrained(repo_id, subfolder="tokenizer", **kwargs)
pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'tokenizer', safe_serialization=True)
# --------------------------------------------- pipeline = CLIPTextModel.from_pretrained(repo_id, subfolder="text_encoder", **kwargs)
def download_sd1_clip(): pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'text_encoder', safe_serialization=True)
print("Installing SD1 clip model...", file=sys.stderr)
version = "openai/clip-vit-large-patch14"
download_from_hf(CLIPTokenizer, version)
download_from_hf(CLIPTextModel, version)
# VAE
logger.info('Downloading stable diffusion VAE')
vae = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse', **kwargs)
vae.save_pretrained(target_dir / 'sd-vae-ft-mse', safe_serialization=True)
# --------------------------------------------- # safety checking
def download_sd2_clip(): logger.info('Downloading safety checker')
version = "stabilityai/stable-diffusion-2" repo_id = "CompVis/stable-diffusion-safety-checker"
print("Installing SD2 clip model...", file=sys.stderr) pipeline = AutoFeatureExtractor.from_pretrained(repo_id,**kwargs)
download_from_hf(CLIPTokenizer, version, subfolder="tokenizer") pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True)
download_from_hf(CLIPTextModel, version, subfolder="text_encoder")
pipeline = StableDiffusionSafetyChecker.from_pretrained(repo_id,**kwargs)
pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True)
except KeyboardInterrupt:
raise
except Exception as e:
logger.error(str(e))
# --------------------------------------------- # ---------------------------------------------
def download_realesrgan(): def download_realesrgan():
print("Installing models from RealESRGAN...", file=sys.stderr) logger.info("Installing models from RealESRGAN...")
model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth" model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth"
wdn_model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth" wdn_model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth"
model_dest = config.root_path / "models/realesrgan/realesr-general-x4v3.pth" model_dest = config.root_path / "models/core/upscaling/realesrgan/realesr-general-x4v3.pth"
wdn_model_dest = config.root_path / "models/realesrgan/realesr-general-wdn-x4v3.pth" wdn_model_dest = config.root_path / "models/core/upscaling/realesrgan/realesr-general-wdn-x4v3.pth"
download_with_progress_bar(model_url, str(model_dest), "RealESRGAN") download_with_progress_bar(model_url, str(model_dest), "RealESRGAN")
download_with_progress_bar(wdn_model_url, str(wdn_model_dest), "RealESRGANwdn") download_with_progress_bar(wdn_model_url, str(wdn_model_dest), "RealESRGANwdn")
def download_gfpgan(): def download_gfpgan():
print("Installing GFPGAN models...", file=sys.stderr) logger.info("Installing GFPGAN models...")
for model in ( for model in (
[ [
"https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth", "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth",
"./models/gfpgan/GFPGANv1.4.pth", "./models/core/face_restoration/gfpgan/GFPGANv1.4.pth",
], ],
[ [
"https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth", "https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth",
"./models/gfpgan/weights/detection_Resnet50_Final.pth", "./models/core/face_restoration/gfpgan/weights/detection_Resnet50_Final.pth",
], ],
[ [
"https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth", "https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth",
"./models/gfpgan/weights/parsing_parsenet.pth", "./models/core/face_restoration/gfpgan/weights/parsing_parsenet.pth",
], ],
): ):
model_url, model_dest = model[0], config.root_path / model[1] model_url, model_dest = model[0], config.root_path / model[1]
@ -239,70 +262,32 @@ def download_gfpgan():
# --------------------------------------------- # ---------------------------------------------
def download_codeformer(): def download_codeformer():
print("Installing CodeFormer model file...", file=sys.stderr) logger.info("Installing CodeFormer model file...")
model_url = ( model_url = (
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth" "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
) )
model_dest = config.root_path / "models/codeformer/codeformer.pth" model_dest = config.root_path / "models/core/face_restoration/codeformer/codeformer.pth"
download_with_progress_bar(model_url, str(model_dest), "CodeFormer") download_with_progress_bar(model_url, str(model_dest), "CodeFormer")
# --------------------------------------------- # ---------------------------------------------
def download_clipseg(): def download_clipseg():
print("Installing clipseg model for text-based masking...", file=sys.stderr) logger.info("Installing clipseg model for text-based masking...")
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined" CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
try: try:
download_from_hf(AutoProcessor, CLIPSEG_MODEL) download_from_hf(AutoProcessor, CLIPSEG_MODEL, config.root_path / 'models/core/misc/clipseg')
download_from_hf(CLIPSegForImageSegmentation, CLIPSEG_MODEL) download_from_hf(CLIPSegForImageSegmentation, CLIPSEG_MODEL,'models/core/misc/clipseg')
except Exception: except Exception:
print("Error installing clipseg model:") logger.info("Error installing clipseg model:")
print(traceback.format_exc()) logger.info(traceback.format_exc())
# ------------------------------------- def download_support_models():
def download_safety_checker(): download_realesrgan()
print("Installing model for NSFW content detection...", file=sys.stderr) download_gfpgan()
try: download_codeformer()
from diffusers.pipelines.stable_diffusion.safety_checker import ( download_clipseg()
StableDiffusionSafetyChecker, download_conversion_models()
)
from transformers import AutoFeatureExtractor
except ModuleNotFoundError:
print("Error installing NSFW checker model:")
print(traceback.format_exc())
return
safety_model_id = "CompVis/stable-diffusion-safety-checker"
print("AutoFeatureExtractor...", file=sys.stderr)
download_from_hf(AutoFeatureExtractor, safety_model_id)
print("StableDiffusionSafetyChecker...", file=sys.stderr)
download_from_hf(StableDiffusionSafetyChecker, safety_model_id)
# -------------------------------------
def download_vaes():
print("Installing stabilityai VAE...", file=sys.stderr)
try:
# first the diffusers version
repo_id = "stabilityai/sd-vae-ft-mse"
args = dict(
cache_dir=config.cache_dir,
)
if not AutoencoderKL.from_pretrained(repo_id, **args):
raise Exception(f"download of {repo_id} failed")
repo_id = "stabilityai/sd-vae-ft-mse-original"
model_name = "vae-ft-mse-840000-ema-pruned.ckpt"
# next the legacy checkpoint version
if not hf_download_with_resume(
repo_id=repo_id,
model_name=model_name,
model_dir=str(config.root_path / Model_dir / Weights_dir),
):
raise Exception(f"download of {model_name} failed")
except Exception as e:
print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
# ------------------------------------- # -------------------------------------
def get_root(root: str = None) -> str: def get_root(root: str = None) -> str:
@ -657,17 +642,13 @@ def default_user_selections(program_opts: Namespace) -> UserSelections:
# ------------------------------------- # -------------------------------------
def initialize_rootdir(root: Path, yes_to_all: bool = False): def initialize_rootdir(root: Path, yes_to_all: bool = False):
print("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **") logger.info("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **")
for name in ( for name in (
"models", "models",
"configs",
"embeddings",
"databases", "databases",
"loras",
"controlnets",
"text-inversion-output", "text-inversion-output",
"text-inversion-training-data", "text-inversion-training-data",
"configs"
): ):
os.makedirs(os.path.join(root, name), exist_ok=True) os.makedirs(os.path.join(root, name), exist_ok=True)
@ -676,6 +657,22 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False):
if not os.path.samefile(configs_src, configs_dest): if not os.path.samefile(configs_src, configs_dest):
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True) shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
dest = root / 'models'
for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]:
for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora,
ModelType.ControlNet,ModelType.TextualInversion]:
path = dest / model_base.value / model_type.value
path.mkdir(parents=True, exist_ok=True)
path = dest / 'core'
path.mkdir(parents=True, exist_ok=True)
with open(root / 'configs' / 'models.yaml','w') as yaml_file:
yaml_file.write(yaml.dump({'__metadata__':
{'version':'3.0.0'}
}
)
)
# ------------------------------------- # -------------------------------------
def run_console_ui( def run_console_ui(
@ -837,7 +834,7 @@ def main():
old_init_file = config.root_path / 'invokeai.init' old_init_file = config.root_path / 'invokeai.init'
new_init_file = config.root_path / 'invokeai.yaml' new_init_file = config.root_path / 'invokeai.yaml'
if old_init_file.exists() and not new_init_file.exists(): if old_init_file.exists() and not new_init_file.exists():
print('** Migrating invokeai.init to invokeai.yaml') logger.info('** Migrating invokeai.init to invokeai.yaml')
migrate_init_file(old_init_file) migrate_init_file(old_init_file)
# Load new init file into config # Load new init file into config
config.parse_args(argv=[],conf=OmegaConf.load(new_init_file)) config.parse_args(argv=[],conf=OmegaConf.load(new_init_file))
@ -855,29 +852,21 @@ def main():
if init_options: if init_options:
write_opts(init_options, new_init_file) write_opts(init_options, new_init_file)
else: else:
print( logger.info(
'\n** CANCELLED AT USER\'S REQUEST. USE THE "invoke.sh" LAUNCHER TO RUN LATER **\n' '\n** CANCELLED AT USER\'S REQUEST. USE THE "invoke.sh" LAUNCHER TO RUN LATER **\n'
) )
sys.exit(0) sys.exit(0)
if opt.skip_support_models: if opt.skip_support_models:
print("\n** SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST **") logger.info("SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST")
else: else:
print("\n** CHECKING/UPDATING SUPPORT MODELS **") logger.info("CHECKING/UPDATING SUPPORT MODELS")
download_bert() download_support_models()
download_sd1_clip()
download_sd2_clip()
download_realesrgan()
download_gfpgan()
download_codeformer()
download_clipseg()
download_safety_checker()
download_vaes()
if opt.skip_sd_weights: if opt.skip_sd_weights:
print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **") logger.info("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
elif models_to_download: elif models_to_download:
print("\n** DOWNLOADING DIFFUSION WEIGHTS **") logger.info("\n** DOWNLOADING DIFFUSION WEIGHTS **")
process_and_execute(opt, models_to_download) process_and_execute(opt, models_to_download)
postscript(errors=errors) postscript(errors=errors)

View File

@ -9,7 +9,7 @@ import warnings
from dataclasses import dataclass,field from dataclasses import dataclass,field
from pathlib import Path from pathlib import Path
from tempfile import TemporaryFile from tempfile import TemporaryFile
from typing import List, Dict, Callable from typing import List, Dict, Set, Callable
import requests import requests
from diffusers import AutoencoderKL from diffusers import AutoencoderKL
@ -20,8 +20,8 @@ from tqdm import tqdm
import invokeai.configs as configs import invokeai.configs as configs
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import ModelManager, ModelType, BaseModelType
from ..stable_diffusion import StableDiffusionGeneratorPipeline from ..stable_diffusion import StableDiffusionGeneratorPipeline
from ..util.logging import InvokeAILogger from ..util.logging import InvokeAILogger
@ -62,7 +62,6 @@ class ModelInstallList:
class UserSelections(): class UserSelections():
install_models: List[str]= field(default_factory=list) install_models: List[str]= field(default_factory=list)
remove_models: List[str]=field(default_factory=list) remove_models: List[str]=field(default_factory=list)
purge_deleted_models: bool=field(default_factory=list)
install_cn_models: List[str] = field(default_factory=list) install_cn_models: List[str] = field(default_factory=list)
remove_cn_models: List[str] = field(default_factory=list) remove_cn_models: List[str] = field(default_factory=list)
install_lora_models: List[str] = field(default_factory=list) install_lora_models: List[str] = field(default_factory=list)
@ -72,6 +71,64 @@ class UserSelections():
scan_directory: Path = None scan_directory: Path = None
autoscan_on_startup: bool=False autoscan_on_startup: bool=False
import_model_paths: str=None import_model_paths: str=None
@dataclass
class ModelLoadInfo():
name: str
model_type: ModelType
base_type: BaseModelType
path: Path = None
repo_id: str = None
description: str = ''
installed: bool = False
recommended: bool = False
class ModelInstall(object):
def __init__(self,config:InvokeAIAppConfig):
self.config = config
self.mgr = ModelManager(config.model_conf_path)
self.datasets = OmegaConf.load(Dataset_path)
def all_models(self)->Dict[str,ModelLoadInfo]:
'''
Return dict of model_key=>ModelStatus
'''
model_dict = dict()
# first populate with the entries in INITIAL_MODELS.yaml
for key, value in self.datasets.items():
name,base,model_type = ModelManager.parse_key(key)
value['name'] = name
value['base_type'] = base
value['model_type'] = model_type
model_dict[key] = ModelLoadInfo(**value)
# supplement with entries in models.yaml
installed_models = self.mgr.list_models()
for base in installed_models.keys():
for model_type in installed_models[base].keys():
for name, value in installed_models[base][model_type].items():
key = ModelManager.create_key(name, base, model_type)
if key in model_dict:
model_dict[key].installed = True
else:
model_dict[key] = ModelLoadInfo(
name = name,
base_type = base,
model_type = model_type,
description = value.get('description'),
path = value.get('path'),
installed = True,
)
return {x : model_dict[x] for x in sorted(model_dict.keys(),key=lambda y: model_dict[y].name.lower())}
def starter_models(self)->Set[str]:
models = set()
for key, value in self.datasets.items():
name,base,model_type = ModelManager.parse_key(key)
if model_type==ModelType.Pipeline:
models.add(key)
return models
def default_config_file(): def default_config_file():
return config.model_conf_path return config.model_conf_path
@ -85,6 +142,15 @@ def initial_models():
return Datasets return Datasets
return (Datasets := OmegaConf.load(Dataset_path)['diffusers']) return (Datasets := OmegaConf.load(Dataset_path)['diffusers'])
def add_models(model_manager, config_file_path: Path, models: List[tuple[str,str,str]]):
print(f'Installing {models}')
def del_models(model_manager, config_file_path: Path, models: List[tuple[str,str,str]]):
for base, model_type, name in models:
logger.info(f"Deleting {name}...")
model_manager.del_model(name, base, model_type)
model_manager.commit(config_file_path)
def install_requested_models( def install_requested_models(
diffusers: ModelInstallList = None, diffusers: ModelInstallList = None,
controlnet: ModelInstallList = None, controlnet: ModelInstallList = None,
@ -95,9 +161,8 @@ def install_requested_models(
external_models: List[str] = None, external_models: List[str] = None,
scan_at_startup: bool = False, scan_at_startup: bool = False,
precision: str = "float16", precision: str = "float16",
purge_deleted: bool = False,
config_file_path: Path = None, config_file_path: Path = None,
model_config_file_callback: Callable[[Path],Path] = None model_config_file_callback: Callable[[Path],Path] = None,
): ):
""" """
Entry point for installing/deleting starter models, or installing external models. Entry point for installing/deleting starter models, or installing external models.
@ -110,40 +175,27 @@ def install_requested_models(
# prevent circular import here # prevent circular import here
from ..model_management import ModelManager from ..model_management import ModelManager
model_manager = ModelManager(OmegaConf.load(config_file_path), precision=precision) model_manager = ModelManager(OmegaConf.load(config_file_path), precision=precision)
if controlnet:
model_manager.install_controlnet_models(controlnet.install_models, access_token=access_token)
model_manager.delete_controlnet_models(controlnet.remove_models)
if lora: for x in [controlnet, lora, ti, diffusers]:
model_manager.install_lora_models(lora.install_models, access_token=access_token) if x:
model_manager.delete_lora_models(lora.remove_models) add_models(model_manager, config_file_path, x.install_models)
del_models(model_manager, config_file_path, x.remove_models)
# if diffusers:
if ti: # if diffusers.install_models and len(diffusers.install_models) > 0:
model_manager.install_ti_models(ti.install_models, access_token=access_token) # logger.info("Installing requested models")
model_manager.delete_ti_models(ti.remove_models) # downloaded_paths = download_weight_datasets(
# models=diffusers.install_models,
if diffusers: # access_token=None,
# TODO: Replace next three paragraphs with calls into new model manager # precision=precision,
if diffusers.remove_models and len(diffusers.remove_models) > 0: # )
logger.info("Processing requested deletions") # successful = {x:v for x,v in downloaded_paths.items() if v is not None}
for model in diffusers.remove_models: # if len(successful) > 0:
logger.info(f"{model}...") # update_config_file(successful, config_file_path)
model_manager.del_model(model, delete_files=purge_deleted) # if len(successful) < len(diffusers.install_models):
model_manager.commit(config_file_path) # unsuccessful = [x for x in downloaded_paths if downloaded_paths[x] is None]
# logger.warning(f"Some of the model downloads were not successful: {unsuccessful}")
if diffusers.install_models and len(diffusers.install_models) > 0:
logger.info("Installing requested models")
downloaded_paths = download_weight_datasets(
models=diffusers.install_models,
access_token=None,
precision=precision,
)
successful = {x:v for x,v in downloaded_paths.items() if v is not None}
if len(successful) > 0:
update_config_file(successful, config_file_path)
if len(successful) < len(diffusers.install_models):
unsuccessful = [x for x in downloaded_paths if downloaded_paths[x] is None]
logger.warning(f"Some of the model downloads were not successful: {unsuccessful}")
# due to above, we have to reload the model manager because conf file # due to above, we have to reload the model manager because conf file
# was changed behind its back # was changed behind its back
@ -156,8 +208,8 @@ def install_requested_models(
if len(external_models) > 0: if len(external_models) > 0:
logger.info("INSTALLING EXTERNAL MODELS") logger.info("INSTALLING EXTERNAL MODELS")
for path_url_or_repo in external_models: for path_url_or_repo in external_models:
logger.debug(path_url_or_repo)
try: try:
logger.debug(f'In install_requested_models; callback = {model_config_file_callback}')
model_manager.heuristic_import( model_manager.heuristic_import(
path_url_or_repo, path_url_or_repo,
commit_to_conf=config_file_path, commit_to_conf=config_file_path,
@ -280,21 +332,18 @@ def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
# --------------------------------------------- # ---------------------------------------------
def download_from_hf( def download_from_hf(
model_class: object, model_name: str, **kwargs model_class: object, model_name: str, destination: Path, **kwargs
): ):
logger = InvokeAILogger.getLogger('InvokeAI') logger = InvokeAILogger.getLogger('InvokeAI')
logger.addFilter(lambda x: 'fp16 is not a valid' not in x.getMessage()) logger.addFilter(lambda x: 'fp16 is not a valid' not in x.getMessage())
path = config.cache_dir
model = model_class.from_pretrained( model = model_class.from_pretrained(
model_name, model_name,
cache_dir=path,
resume_download=True, resume_download=True,
**kwargs, **kwargs,
) )
model_name = "--".join(("models", *model_name.split("/"))) model.save_pretrained(destination, safe_serialization=True)
return path / model_name if model else None return destination
def _download_diffusion_weights( def _download_diffusion_weights(
mconfig: DictConfig, access_token: str, precision: str = "float32" mconfig: DictConfig, access_token: str, precision: str = "float32"

View File

@ -305,7 +305,8 @@ class ModelManager(object):
) -> str: ) -> str:
return f"{base_model}/{model_type}/{model_name}" return f"{base_model}/{model_type}/{model_name}"
def parse_key(self, model_key: str) -> Tuple[str, BaseModelType, ModelType]: @classmethod
def parse_key(cls, model_key: str) -> Tuple[str, BaseModelType, ModelType]:
base_model_str, model_type_str, model_name = model_key.split('/', 2) base_model_str, model_type_str, model_name = model_key.split('/', 2)
try: try:
model_type = ModelType(model_type_str) model_type = ModelType(model_type_str)
@ -548,7 +549,7 @@ class ModelManager(object):
line = f'{model_info["name"]:25s} {model_info["type"]:10s} {model_info["description"]}' line = f'{model_info["name"]:25s} {model_info["type"]:10s} {model_info["description"]}'
print(line) print(line)
# TODO: test when ui implemented # Tested - LS
def del_model( def del_model(
self, self,
model_name: str, model_name: str,
@ -558,7 +559,6 @@ class ModelManager(object):
""" """
Delete the named model. Delete the named model.
""" """
raise Exception("TODO: del_model") # TODO: redo
model_key = self.create_key(model_name, base_model, model_type) model_key = self.create_key(model_name, base_model, model_type)
model_cfg = self.models.pop(model_key, None) model_cfg = self.models.pop(model_key, None)
@ -574,10 +574,11 @@ class ModelManager(object):
self.cache.uncache_model(cache_id) self.cache.uncache_model(cache_id)
# if model inside invoke models folder - delete files # if model inside invoke models folder - delete files
if model_cfg.path.startswith("models/") or model_cfg.path.startswith("models\\"): model_path = self.globals.root_path / model_cfg.path
model_path = self.globals.root_dir / model_cfg.path
if model_path.isdir(): if model_path.is_relative_to(self.globals.models_path):
shutil.rmtree(str(model_path)) if model_path.is_dir():
rmtree(str(model_path))
else: else:
model_path.unlink() model_path.unlink()
@ -712,5 +713,5 @@ class ModelManager(object):
self.models[model_key] = model_config self.models[model_key] = model_config
new_models_found = True new_models_found = True
if new_models_found: if new_models_found and self.config_path:
self.commit() self.commit()

View File

@ -1,3 +1,4 @@
import os
import torch import torch
from typing import Optional, Union, Literal from typing import Optional, Union, Literal
from .base import ( from .base import (

View File

@ -1,107 +1,92 @@
# This file predefines a few models that the user may want to install. # This file predefines a few models that the user may want to install.
diffusers: sd-1/pipeline/stable-diffusion-v1-5:
stable-diffusion-1.5: description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
description: Stable Diffusion version 1.5 diffusers model (4.27 GB) repo_id: runwayml/stable-diffusion-v1-5
repo_id: runwayml/stable-diffusion-v1-5 recommended: True
format: diffusers sd-1/pipeline/stable-diffusion-inpainting:
vae: description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
repo_id: stabilityai/sd-vae-ft-mse repo_id: runwayml/stable-diffusion-inpainting
recommended: True recommended: True
default: True sd-2/pipeline/stable-diffusion-2-1:
sd-inpainting-1.5: description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB) repo_id: stabilityai/stable-diffusion-2-1
repo_id: runwayml/stable-diffusion-inpainting recommended: True
format: diffusers sd-2/pipeline/stable-diffusion-2-inpainting:
vae: description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
repo_id: stabilityai/sd-vae-ft-mse repo_id: stabilityai/stable-diffusion-2-inpainting
recommended: True recommended: False
stable-diffusion-2.1: sd-1/pipeline/Analog-Diffusion:
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB) description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
repo_id: stabilityai/stable-diffusion-2-1 repo_id: wavymulder/Analog-Diffusion
format: diffusers recommended: false
recommended: True sd-1/pipeline/Deliberate:
sd-inpainting-2.0: description: Versatile model that produces detailed images up to 768px (4.27 GB)
description: Stable Diffusion version 2.0 inpainting model (5.21 GB) repo_id: XpucT/Deliberate
repo_id: stabilityai/stable-diffusion-2-inpainting recommended: False
format: diffusers sd-1/pipeline/Dungeons-and-Diffusion:
recommended: False description: Dungeons & Dragons characters (2.13 GB)
analog-diffusion-1.0: repo_id: 0xJustin/Dungeons-and-Diffusion
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB) recommended: False
repo_id: wavymulder/Analog-Diffusion sd-1/pipeline/dreamlike-photoreal-2.0:
format: diffusers description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
recommended: false repo_id: dreamlike-art/dreamlike-photoreal-2.0
deliberate-1.0: recommended: False
description: Versatile model that produces detailed images up to 768px (4.27 GB) sd-1/pipeline/Inkpunk-Diffusion:
format: diffusers description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
repo_id: XpucT/Deliberate repo_id: Envvi/Inkpunk-Diffusion
recommended: False recommended: False
d&d-diffusion-1.0: sd-1/pipeline/openjourney:
description: Dungeons & Dragons characters (2.13 GB) description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
format: diffusers repo_id: prompthero/openjourney
repo_id: 0xJustin/Dungeons-and-Diffusion recommended: False
recommended: False sd-1/pipeline/portraitplus:
dreamlike-photoreal-2.0: description: An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB)
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB) repo_id: wavymulder/portraitplus
format: diffusers recommended: False
repo_id: dreamlike-art/dreamlike-photoreal-2.0 sd-1/pipeline/seek.art_MEGA:
recommended: False description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
inkpunk-1.0: recommended: False
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB) sd-1/pipeline/trinart_stable_diffusion_v2:
format: diffusers description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
repo_id: Envvi/Inkpunk-Diffusion repo_id: naclbit/trinart_stable_diffusion_v2
recommended: False recommended: False
openjourney-4.0: sd-1/pipeline/waifu-diffusion:
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB) description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB)
format: diffusers repo_id: hakurei/waifu-diffusion
repo_id: prompthero/openjourney recommended: False
vae: sd-1/controlnet/canny:
repo_id: stabilityai/sd-vae-ft-mse repo_id: lllyasviel/control_v11p_sd15_canny
recommended: False sd-1/controlnet/inpaint:
portrait-plus-1.0: repo_id: lllyasviel/control_v11p_sd15_inpaint
description: An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB) sd-1/controlnet/mlsd:
format: diffusers repo_id: lllyasviel/control_v11p_sd15_mlsd
repo_id: wavymulder/portraitplus sd-1/controlnet/depth:
recommended: False repo_id: lllyasviel/control_v11f1p_sd15_depth
seek-art-mega-1.0: sd-1/controlnet/normal_bae:
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB) repo_id: lllyasviel/control_v11p_sd15_normalbae
repo_id: coreco/seek.art_MEGA sd-1/controlnet/seg:
format: diffusers repo_id: lllyasviel/control_v11p_sd15_seg
vae: sd-1/controlnet/lineart:
repo_id: stabilityai/sd-vae-ft-mse repo_id: lllyasviel/control_v11p_sd15_lineart
recommended: False sd-1/controlnet/lineart_anime:
trinart-2.0: repo_id: lllyasviel/control_v11p_sd15s2_lineart_anime
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB) sd-1/controlnet/scribble:
repo_id: naclbit/trinart_stable_diffusion_v2 repo_id: lllyasviel/control_v11p_sd15_scribble
format: diffusers sd-1/controlnet/softedge:
vae: repo_id: lllyasviel/control_v11p_sd15_softedge
repo_id: stabilityai/sd-vae-ft-mse sd-1/controlnet/shuffle:
recommended: False repo_id: lllyasviel/control_v11e_sd15_shuffle
waifu-diffusion-1.4: sd-1/controlnet/tile:
description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB) repo_id: lllyasviel/control_v11f1e_sd15_tile
repo_id: hakurei/waifu-diffusion sd-1/controlnet/ip2p:
format: diffusers repo_id: lllyasviel/control_v11e_sd15_ip2p
vae: sd-1/embedding/EasyNegative:
repo_id: stabilityai/sd-vae-ft-mse path: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors
recommended: False sd-1/embedding/ahx-beta-453407d:
controlnet: repo_id: sd-concepts-library/ahx-beta-453407d
canny: lllyasviel/control_v11p_sd15_canny sd-1/lora/LowRA:
inpaint: lllyasviel/control_v11p_sd15_inpaint path: https://civitai.com/api/download/models/63006
mlsd: lllyasviel/control_v11p_sd15_mlsd sd-1/lora/Ink Scenery:
depth: lllyasviel/control_v11f1p_sd15_depth path: https://civitai.com/api/download/models/83390
normal_bae: lllyasviel/control_v11p_sd15_normalbae sd-1/lora/sd-model-finetuned-lora-t4:
seg: lllyasviel/control_v11p_sd15_seg repo_id: sayakpaul/sd-model-finetuned-lora-t4
lineart: lllyasviel/control_v11p_sd15_lineart
lineart_anime: lllyasviel/control_v11p_sd15s2_lineart_anime
scribble: lllyasviel/control_v11p_sd15_scribble
softedge: lllyasviel/control_v11p_sd15_softedge
shuffle: lllyasviel/control_v11e_sd15_shuffle
tile: lllyasviel/control_v11f1e_sd15_tile
ip2p: lllyasviel/control_v11e_sd15_ip2p
textual_inversion:
'EasyNegative': https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors
'ahx-beta-453407d': sd-concepts-library/ahx-beta-453407d
lora:
'LowRA': https://civitai.com/api/download/models/63006
'Ink scenery': https://civitai.com/api/download/models/83390
'sd-model-finetuned-lora-t4': sayakpaul/sd-model-finetuned-lora-t4

View File

@ -31,17 +31,17 @@ from omegaconf import OmegaConf
from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.util.logging import InvokeAILogger
from invokeai.backend.install.model_install_backend import ( from invokeai.backend.install.model_install_backend import (
Dataset_path, Dataset_path, # most of these should go!!
default_config_file, default_config_file,
default_dataset, default_dataset,
install_requested_models, install_requested_models,
recommended_datasets, recommended_datasets,
ModelInstallList, ModelInstallList,
UserSelections, UserSelections,
ModelInstall
) )
from invokeai.backend import ModelManager from invokeai.backend.model_management import ModelManager, BaseModelType, ModelType
from invokeai.backend.util import choose_precision, choose_torch_device from invokeai.backend.util import choose_precision, choose_torch_device
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.install.widgets import ( from invokeai.frontend.install.widgets import (
CenteredTitleText, CenteredTitleText,
MultiSelectColumns, MultiSelectColumns,
@ -58,6 +58,7 @@ from invokeai.frontend.install.widgets import (
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
config = InvokeAIAppConfig.get_config() config = InvokeAIAppConfig.get_config()
logger = InvokeAILogger.getLogger()
# build a table mapping all non-printable characters to None # build a table mapping all non-printable characters to None
# for stripping control characters # for stripping control characters
@ -90,25 +91,10 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
if not config.model_conf_path.exists(): if not config.model_conf_path.exists():
with open(config.model_conf_path,'w') as file: with open(config.model_conf_path,'w') as file:
print('# InvokeAI model configuration file',file=file) print('# InvokeAI model configuration file',file=file)
model_manager = ModelManager(config.model_conf_path) self.installer = ModelInstall(config)
self.all_models = self.installer.all_models()
self.starter_models = OmegaConf.load(Dataset_path)['diffusers'] self.starter_models = self.installer.starter_models()
self.installed_diffusers_models = self.list_additional_diffusers_models( self.model_labels = self._get_model_labels()
model_manager,
self.starter_models,
)
self.installed_cn_models = model_manager.list_controlnet_models()
self.installed_lora_models = model_manager.list_lora_models()
self.installed_ti_models = model_manager.list_ti_models()
try:
self.existing_models = OmegaConf.load(default_config_file())
except:
self.existing_models = dict()
self.starter_model_list = list(self.starter_models.keys())
self.installed_models = dict()
window_width, window_height = get_terminal_size() window_width, window_height = get_terminal_size()
self.nextrely -= 1 self.nextrely -= 1
@ -141,39 +127,36 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
scroll_exit = True, scroll_exit = True,
) )
self.tabs.on_changed = self._toggle_tables self.tabs.on_changed = self._toggle_tables
top_of_table = self.nextrely top_of_table = self.nextrely
self.starter_diffusers_models = self.add_starter_diffusers() self.starter_pipelines = self.add_starter_pipelines()
bottom_of_table = self.nextrely bottom_of_table = self.nextrely
self.nextrely = top_of_table self.nextrely = top_of_table
self.diffusers_models = self.add_diffusers_widgets( self.pipeline_models = self.add_model_widgets(
predefined_models=self.installed_diffusers_models, model_type=ModelType.Pipeline,
model_type='Diffusers',
window_width=window_width, window_width=window_width,
exclude = self.starter_models
) )
bottom_of_table = max(bottom_of_table,self.nextrely) bottom_of_table = max(bottom_of_table,self.nextrely)
self.nextrely = top_of_table self.nextrely = top_of_table
self.controlnet_models = self.add_model_widgets( self.controlnet_models = self.add_model_widgets(
predefined_models=self.installed_cn_models, model_type=ModelType.ControlNet,
model_type='ControlNet',
window_width=window_width, window_width=window_width,
) )
bottom_of_table = max(bottom_of_table,self.nextrely) bottom_of_table = max(bottom_of_table,self.nextrely)
self.nextrely = top_of_table self.nextrely = top_of_table
self.lora_models = self.add_model_widgets( self.lora_models = self.add_model_widgets(
predefined_models=self.installed_lora_models, model_type=ModelType.Lora,
model_type="LoRA/LyCORIS",
window_width=window_width, window_width=window_width,
) )
bottom_of_table = max(bottom_of_table,self.nextrely) bottom_of_table = max(bottom_of_table,self.nextrely)
self.nextrely = top_of_table self.nextrely = top_of_table
self.ti_models = self.add_model_widgets( self.ti_models = self.add_model_widgets(
predefined_models=self.installed_ti_models, model_type=ModelType.TextualInversion,
model_type="Textual Inversion Embeddings",
window_width=window_width, window_width=window_width,
) )
bottom_of_table = max(bottom_of_table,self.nextrely) bottom_of_table = max(bottom_of_table,self.nextrely)
@ -220,18 +203,20 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self._toggle_tables([self.current_tab]) self._toggle_tables([self.current_tab])
############# diffusers tab ########## ############# diffusers tab ##########
def add_starter_diffusers(self)->dict[str, npyscreen.widget]: def add_starter_pipelines(self)->dict[str, npyscreen.widget]:
'''Add widgets responsible for selecting diffusers models''' '''Add widgets responsible for selecting diffusers models'''
widgets = dict() widgets = dict()
models = self.all_models
starter_model_labels = self._get_starter_model_labels() starters = self.starter_models
recommended_models = [ starter_model_labels = self.model_labels
recommended_models = set([
x x
for x in self.starter_model_list for x in starters
if self.starter_models[x].get("recommended", False) if models[x].recommended
] ])
self.installed_models = sorted( self.installed_models = sorted(
[x for x in list(self.starter_models.keys()) if x in self.existing_models] [x for x in starters if models[x].installed]
) )
widgets.update( widgets.update(
@ -246,55 +231,46 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self.nextrely -= 1 self.nextrely -= 1
# if user has already installed some initial models, then don't patronize them # if user has already installed some initial models, then don't patronize them
# by showing more recommendations # by showing more recommendations
show_recommended = not self.existing_models show_recommended = len(self.installed_models)==0
keys = [x for x in models.keys() if x in starters]
widgets.update( widgets.update(
models_selected = self.add_widget_intelligent( models_selected = self.add_widget_intelligent(
MultiSelectColumns, MultiSelectColumns,
columns=1, columns=1,
name="Install Starter Models", name="Install Starter Models",
values=starter_model_labels, values=[starter_model_labels[x] for x in keys],
value=[ value=[
self.starter_model_list.index(x) keys.index(x)
for x in self.starter_model_list for x in keys
if (show_recommended and x in recommended_models)\ if (show_recommended and models[x].recommended) \
or (x in self.existing_models) or (x in self.installed_models)
], ],
max_height=len(starter_model_labels) + 1, max_height=len(starters) + 1,
relx=4, relx=4,
scroll_exit=True, scroll_exit=True,
) ),
models = keys,
) )
widgets.update(
purge_deleted = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Purge unchecked diffusers models from disk",
value=False,
scroll_exit=True,
relx=4,
)
)
widgets['purge_deleted'].when_value_edited = lambda: self.sync_purge_buttons(widgets['purge_deleted'])
self.nextrely += 1 self.nextrely += 1
return widgets return widgets
############# Add a set of model install widgets ######## ############# Add a set of model install widgets ########
def add_model_widgets(self, def add_model_widgets(self,
predefined_models: dict[str,bool], model_type: ModelType,
model_type: str,
window_width: int=120, window_width: int=120,
install_prompt: str=None, install_prompt: str=None,
add_purge_deleted: bool=False, exclude: set=set(),
)->dict[str,npyscreen.widget]: )->dict[str,npyscreen.widget]:
'''Generic code to create model selection widgets''' '''Generic code to create model selection widgets'''
widgets = dict() widgets = dict()
model_list = sorted(predefined_models.keys()) model_list = [x for x in self.all_models if self.all_models[x].model_type==model_type and not x in exclude]
model_labels = [self.model_labels[x] for x in model_list]
if len(model_list) > 0: if len(model_list) > 0:
max_width = max([len(x) for x in model_list]) max_width = max([len(x) for x in model_labels])
columns = window_width // (max_width+8) # 8 characters for "[x] " and padding columns = window_width // (max_width+8) # 8 characters for "[x] " and padding
columns = min(len(model_list),columns) or 1 columns = min(len(model_list),columns) or 1
prompt = install_prompt or f"Select the desired {model_type} models to install. Unchecked models will be purged from disk." prompt = install_prompt or f"Select the desired {model_type.value.title()} models to install. Unchecked models will be purged from disk."
widgets.update( widgets.update(
label1 = self.add_widget_intelligent( label1 = self.add_widget_intelligent(
@ -310,31 +286,19 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
MultiSelectColumns, MultiSelectColumns,
columns=columns, columns=columns,
name=f"Install {model_type} Models", name=f"Install {model_type} Models",
values=model_list, values=model_labels,
value=[ value=[
model_list.index(x) model_list.index(x)
for x in model_list for x in model_list
if predefined_models[x] if self.all_models[x].installed
], ],
max_height=len(model_list)//columns + 1, max_height=len(model_list)//columns + 1,
relx=4, relx=4,
scroll_exit=True, scroll_exit=True,
) ),
models = model_list,
) )
if add_purge_deleted:
self.nextrely += 1
widgets.update(
purge_deleted = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Purge unchecked diffusers models from disk",
value=False,
scroll_exit=True,
relx=4,
)
)
widgets['purge_deleted'].when_value_edited = lambda: self.sync_purge_buttons(widgets['purge_deleted'])
self.nextrely += 1 self.nextrely += 1
widgets.update( widgets.update(
download_ids = self.add_widget_intelligent( download_ids = self.add_widget_intelligent(
@ -349,18 +313,15 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
### Tab for arbitrary diffusers widgets ### ### Tab for arbitrary diffusers widgets ###
def add_diffusers_widgets(self, def add_diffusers_widgets(self,
predefined_models: dict[str,bool], model_type: ModelType=ModelType.Pipeline,
model_type: str='Diffusers',
window_width: int=120, window_width: int=120,
)->dict[str,npyscreen.widget]: )->dict[str,npyscreen.widget]:
'''Similar to add_model_widgets() but adds some additional widgets at the bottom '''Similar to add_model_widgets() but adds some additional widgets at the bottom
to support the autoload directory''' to support the autoload directory'''
widgets = self.add_model_widgets( widgets = self.add_model_widgets(
predefined_models, model_type = model_type,
'Diffusers', window_width = window_width,
window_width, install_prompt=f"Additional {model_type.value.title()} models already installed.",
install_prompt="Additional diffusers models already installed.",
add_purge_deleted=True
) )
label = "Directory to scan for models to automatically import (<tab> autocompletes):" label = "Directory to scan for models to automatically import (<tab> autocompletes):"
@ -390,21 +351,17 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
) )
return widgets return widgets
def sync_purge_buttons(self,checkbox):
value = checkbox.value
self.starter_diffusers_models['purge_deleted'].value = value
self.diffusers_models['purge_deleted'].value = value
def resize(self): def resize(self):
super().resize() super().resize()
if (s := self.starter_diffusers_models.get("models_selected")): if (s := self.starter_pipelines.get("models_selected")):
s.values = self._get_starter_model_labels() keys = [x for x in self.all_models.keys() if x in self.starter_models]
s.values = [self.model_labels[x] for x in keys]
def _toggle_tables(self, value=None): def _toggle_tables(self, value=None):
selected_tab = value[0] selected_tab = value[0]
widgets = [ widgets = [
self.starter_diffusers_models, self.starter_pipelines,
self.diffusers_models, self.pipeline_models,
self.controlnet_models, self.controlnet_models,
self.lora_models, self.lora_models,
self.ti_models, self.ti_models,
@ -412,34 +369,38 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
for group in widgets: for group in widgets:
for k,v in group.items(): for k,v in group.items():
v.hidden = True try:
v.editable = False v.hidden = True
v.editable = False
except:
pass
for k,v in widgets[selected_tab].items(): for k,v in widgets[selected_tab].items():
v.hidden = False try:
if not isinstance(v,(npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)): v.hidden = False
v.editable = True if not isinstance(v,(npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)):
v.editable = True
except:
pass
self.__class__.current_tab = selected_tab # for persistence self.__class__.current_tab = selected_tab # for persistence
self.display() self.display()
def _get_starter_model_labels(self) -> List[str]: def _get_model_labels(self) -> dict[str,str]:
window_width, window_height = get_terminal_size() window_width, window_height = get_terminal_size()
label_width = 25
checkbox_width = 4 checkbox_width = 4
spacing_width = 2 spacing_width = 2
models = self.all_models
label_width = max([len(models[x].name) for x in models])
description_width = window_width - label_width - checkbox_width - spacing_width description_width = window_width - label_width - checkbox_width - spacing_width
im = self.starter_models
names = self.starter_model_list
descriptions = [
im[x].description[0 : description_width - 3] + "..."
if len(im[x].description) > description_width
else im[x].description
for x in names
]
return [
f"%-{label_width}s %s" % (names[x], descriptions[x])
for x in range(0, len(names))
]
result = dict()
for x in models.keys():
description = models[x].description
description = description[0 : description_width - 3] + "..." \
if description and len(description) > description_width \
else description if description else ''
result[x] = f"%-{label_width}s %s" % (models[x].name, description)
return result
def _get_columns(self) -> int: def _get_columns(self) -> int:
window_width, window_height = get_terminal_size() window_width, window_height = get_terminal_size()
@ -548,8 +509,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
# rebuild the form, saving and restoring some of the fields that need to be preserved. # rebuild the form, saving and restoring some of the fields that need to be preserved.
saved_messages = self.monitor.entry_widget.values saved_messages = self.monitor.entry_widget.values
autoload_dir = self.diffusers_models['autoload_directory'].value autoload_dir = self.pipeline_models['autoload_directory'].value
autoscan = self.diffusers_models['autoscan_on_startup'].value autoscan = self.pipeline_models['autoscan_on_startup'].value
app.main_form = app.addForm( app.main_form = app.addForm(
"MAIN", addModelsForm, name="Install Stable Diffusion Models", multipage=self.multipage, "MAIN", addModelsForm, name="Install Stable Diffusion Models", multipage=self.multipage,
@ -558,23 +519,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
app.main_form.monitor.entry_widget.values = saved_messages app.main_form.monitor.entry_widget.values = saved_messages
app.main_form.monitor.entry_widget.buffer([''],scroll_end=True) app.main_form.monitor.entry_widget.buffer([''],scroll_end=True)
app.main_form.diffusers_models['autoload_directory'].value = autoload_dir app.main_form.pipeline_models['autoload_directory'].value = autoload_dir
app.main_form.diffusers_models['autoscan_on_startup'].value = autoscan app.main_form.pipeline_models['autoscan_on_startup'].value = autoscan
###############################################################
def list_additional_diffusers_models(self,
manager: ModelManager,
starters:dict
)->dict[str,bool]:
'''Return a dict of all the currently installed models that are not on the starter list'''
model_info = manager.list_models()
additional_models = {
x:True for x in model_info \
if model_info[x]['format']=='diffusers' \
and x not in starters
}
return additional_models
def marshall_arguments(self): def marshall_arguments(self):
""" """
@ -591,24 +537,20 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
selections = self.parentApp.user_selections selections = self.parentApp.user_selections
# Starter models to install/remove # Starter models to install/remove
starter_models = dict( # TO DO - turn these into a dict so we don't have to hard-code the attributes
map( print(f'installed={[x for x in self.all_models if self.all_models[x].installed]}',file=f)
lambda x: (self.starter_model_list[x], True), for section in [self.starter_pipelines, self.pipeline_models,
self.starter_diffusers_models['models_selected'].value, self.controlnet_models, self.lora_models, self.ti_models]:
) selected = set([section['models'][x] for x in section['models_selected'].value])
) models_to_install = [x for x in selected if not self.all_models[x].installed]
selections.purge_deleted_models = self.starter_diffusers_models['purge_deleted'].value or \ models_to_remove = [x for x in section['models'] if x not in selected and self.all_models[x].installed]
self.diffusers_models['purge_deleted'].value
selections.install_models = [x for x in starter_models if x not in self.existing_models]
selections.remove_models = [x for x in self.starter_model_list if x in self.existing_models and x not in starter_models]
# "More" models # "More" models
selections.import_model_paths = self.diffusers_models['download_ids'].value.split() selections.import_model_paths = self.pipeline_models['download_ids'].value.split()
if diffusers_selected := self.diffusers_models.get('models_selected'): if diffusers_selected := self.pipeline_models.get('models_selected'):
selections.remove_models.extend([x selections.remove_models.extend([x
for x in diffusers_selected.values for x in diffusers_selected.values
if self.installed_diffusers_models[x] if self.installed_pipeline_models[x]
and diffusers_selected.values.index(x) not in diffusers_selected.value and diffusers_selected.values.index(x) not in diffusers_selected.value
] ]
) )
@ -659,9 +601,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
selections.install_ti_models.extend(additional_tis) selections.install_ti_models.extend(additional_tis)
# load directory and whether to scan on startup # load directory and whether to scan on startup
selections.scan_directory = self.diffusers_models['autoload_directory'].value selections.scan_directory = self.pipeline_models['autoload_directory'].value
selections.autoscan_on_startup = self.diffusers_models['autoscan_on_startup'].value selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value
class AddModelApplication(npyscreen.NPSAppManaged): class AddModelApplication(npyscreen.NPSAppManaged):
def __init__(self,opt): def __init__(self,opt):
@ -761,19 +702,19 @@ def process_and_execute(opt: Namespace,
directory_to_scan = selections.scan_directory directory_to_scan = selections.scan_directory
scan_at_startup = selections.autoscan_on_startup scan_at_startup = selections.autoscan_on_startup
potential_models_to_install = selections.import_model_paths potential_models_to_install = selections.import_model_paths
name_map = selections.model_name_map
install_requested_models( install_requested_models(
diffusers = ModelInstallList(models_to_install, models_to_remove), diffusers = ModelInstallList(models_to_install, [name_map[ModelType.Pipeline][x] for x in models_to_remove]),
controlnet = ModelInstallList(selections.install_cn_models, selections.remove_cn_models), controlnet = ModelInstallList(selections.install_cn_models, [name_map[ModelType.ControlNet][x] for x in selections.remove_cn_models]),
lora = ModelInstallList(selections.install_lora_models, selections.remove_lora_models), lora = ModelInstallList(selections.install_lora_models, [name_map[ModelType.Lora][x] for x in selections.remove_lora_models]),
ti = ModelInstallList(selections.install_ti_models, selections.remove_ti_models), ti = ModelInstallList(selections.install_ti_models, [name_map[ModelType.TextualInversion][x] for x in selections.remove_ti_models]),
scan_directory=Path(directory_to_scan) if directory_to_scan else None, scan_directory=Path(directory_to_scan) if directory_to_scan else None,
external_models=potential_models_to_install, external_models=potential_models_to_install,
scan_at_startup=scan_at_startup, scan_at_startup=scan_at_startup,
precision="float32" precision="float32"
if opt.full_precision if opt.full_precision
else choose_precision(torch.device(choose_torch_device())), else choose_precision(torch.device(choose_torch_device())),
purge_deleted=selections.purge_deleted_models,
config_file_path=Path(opt.config_file) if opt.config_file else config.model_conf_path, config_file_path=Path(opt.config_file) if opt.config_file else config.model_conf_path,
model_config_file_callback = lambda x: ask_user_for_config_file(x,conn_out) model_config_file_callback = lambda x: ask_user_for_config_file(x,conn_out)
) )
@ -828,7 +769,6 @@ def select_and_download_models(opt: Namespace):
ti=ModelInstallList(**{action:opt.textual_inversions or []}), ti=ModelInstallList(**{action:opt.textual_inversions or []}),
lora=ModelInstallList(**{action:opt.loras or []}), lora=ModelInstallList(**{action:opt.loras or []}),
precision=precision, precision=precision,
purge_deleted=True,
model_config_file_callback=lambda x: ask_user_for_config_file(x), model_config_file_callback=lambda x: ask_user_for_config_file(x),
) )
elif opt.default_only: elif opt.default_only:

View File

@ -24,14 +24,32 @@ from transformers import (
) )
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.backend.model_management import ModelManager
from invokeai.backend.model_management.model_probe import ( from invokeai.backend.model_management.model_probe import (
ModelProbe, ModelType, BaseModelType ModelProbe, ModelType, BaseModelType, SchedulerPredictionType, ModelVariantInfo
) )
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error() transformers.logging.set_verbosity_error()
diffusers.logging.set_verbosity_error() diffusers.logging.set_verbosity_error()
model_names = set()
def unique_name(name,info)->str:
done = False
key = ModelManager.create_key(name,info.base_type,info.model_type)
unique_name = key
counter = 1
while not done:
if unique_name in model_names:
unique_name = f'{key}-{counter:0>2d}'
counter += 1
else:
done = True
model_names.add(unique_name)
name,_,_ = ModelManager.parse_key(unique_name)
return name
def create_directory_structure(dest: Path): def create_directory_structure(dest: Path):
for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]: for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]:
for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora, for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora,
@ -113,10 +131,10 @@ def migrate_conversion_models(dest_directory: Path):
# sd-1 # sd-1
repo_id = 'openai/clip-vit-large-patch14' repo_id = 'openai/clip-vit-large-patch14'
pipeline = CLIPTokenizer.from_pretrained(repo_id, **kwargs) pipeline = CLIPTokenizer.from_pretrained(repo_id, **kwargs)
pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14', safe_serialization=True) pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14' / 'tokenizer', safe_serialization=True)
pipeline = CLIPTextModel.from_pretrained(repo_id, **kwargs) pipeline = CLIPTextModel.from_pretrained(repo_id, **kwargs)
pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14', safe_serialization=True) pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14' / 'text_encoder', safe_serialization=True)
# sd-2 # sd-2
repo_id = "stabilityai/stable-diffusion-2" repo_id = "stabilityai/stable-diffusion-2"
@ -153,12 +171,48 @@ def migrate_tuning_models(dest: Path):
logger.info(f'Scanning {subdir}') logger.info(f'Scanning {subdir}')
migrate_models(src, dest) migrate_models(src, dest)
def write_yaml(model_name: str, path:Path, info:ModelVariantInfo, dest_yaml: io.TextIOBase):
name = unique_name(model_name, info)
stanza = {
f'{info.base_type.value}/{info.model_type.value}/{name}': {
'name': model_name,
'path': str(path),
'description': f'diffusers model {model_name}',
'format': 'diffusers',
'image_size': info.image_size,
'base': info.base_type.value,
'variant': info.variant_type.value,
'prediction_type': info.prediction_type.value,
'upcast_attention': info.prediction_type == SchedulerPredictionType.VPrediction
}
}
dest_yaml.write(yaml.dump(stanza))
dest_yaml.flush()
def migrate_converted(dest_dir: Path, dest_yaml: io.TextIOBase):
for sub_dir in [Path('./models/converted_ckpts'),Path('./models/optimize-ckpts')]:
for model in sub_dir.glob('*'):
if not model.is_dir():
continue
info = ModelProbe().heuristic_probe(model)
if not info:
continue
dest = Path(dest_dir, info.base_type.value, info.model_type.value, model.name)
try:
copy_dir(model,dest)
rel_path = Path('models',dest.relative_to(dest_dir))
write_yaml(model.name,path=rel_path,info=info, dest_yaml=dest_yaml)
except KeyboardInterrupt:
raise
except Exception as e:
logger.warning(f'Could not migrate the converted diffusers {model.name}: {str(e)}. Skipping.')
def migrate_pipelines(dest_dir: Path, dest_yaml: io.TextIOBase): def migrate_pipelines(dest_dir: Path, dest_yaml: io.TextIOBase):
cache = Path('./models/hub') cache = Path('./models/hub')
kwargs = dict( kwargs = dict(
cache_dir = cache, cache_dir = cache,
local_files_only = True,
safety_checker = None, safety_checker = None,
# local_files_only = True,
) )
for model in cache.glob('models--*'): for model in cache.glob('models--*'):
if len(list(model.glob('snapshots/**/model_index.json')))==0: if len(list(model.glob('snapshots/**/model_index.json')))==0:
@ -166,38 +220,26 @@ def migrate_pipelines(dest_dir: Path, dest_yaml: io.TextIOBase):
_,owner,repo_name=model.name.split('--') _,owner,repo_name=model.name.split('--')
repo_id = f'{owner}/{repo_name}' repo_id = f'{owner}/{repo_name}'
revisions = [x.name for x in model.glob('refs/*')] revisions = [x.name for x in model.glob('refs/*')]
for revision in revisions:
logger.info(f'Migrating {repo_id}, revision {revision}') # if an fp16 is available we use that
try: revision = 'fp16' if len(revisions) > 1 and 'fp16' in revisions else revisions[0]
pipeline = StableDiffusionPipeline.from_pretrained( logger.info(f'Migrating {repo_id}, revision {revision}')
repo_id, try:
revision=revision, pipeline = StableDiffusionPipeline.from_pretrained(
**kwargs) repo_id,
info = ModelProbe().heuristic_probe(pipeline) revision=revision,
if not info: **kwargs)
continue info = ModelProbe().heuristic_probe(pipeline)
dest = Path(dest_dir, info.base_type.value, info.model_type.value, f'{repo_name}-{revision}') if not info:
pipeline.save_pretrained(dest, safe_serialization=True) continue
rel_path = Path('models',dest.relative_to(dest_dir)) dest = Path(dest_dir, info.base_type.value, info.model_type.value, f'{repo_name}')
stanza = { pipeline.save_pretrained(dest, safe_serialization=True)
f'{info.base_type.value}/{info.model_type.value}/{repo_name}-{revision}': rel_path = Path('models',dest.relative_to(dest_dir))
{ write_yaml(repo_name, path=rel_path, info=info, dest_yaml=dest_yaml)
'name': repo_name, except KeyboardInterrupt:
'path': str(rel_path), raise
'description': f'diffusers model {repo_id}', except Exception as e:
'format': 'diffusers', logger.warning(f'Could not load the "{revision}" version of {repo_id}. Skipping.')
'image_size': info.image_size,
'base': info.base_type.value,
'variant': info.variant_type.value,
'prediction_type': info.prediction_type.value,
}
}
print(yaml.dump(stanza),file=dest_yaml,end="")
dest_yaml.flush()
except KeyboardInterrupt:
raise
except Exception as e:
logger.warning(f'Could not load the "{revision}" version of {repo_id}. Skipping.')
def migrate_checkpoints(dest_dir: Path, dest_yaml: io.TextIOBase): def migrate_checkpoints(dest_dir: Path, dest_yaml: io.TextIOBase):
# find any checkpoints referred to in old models.yaml # find any checkpoints referred to in old models.yaml
@ -218,6 +260,7 @@ def migrate_checkpoints(dest_dir: Path, dest_yaml: io.TextIOBase):
dest = Path(dest_dir, info.base_type.value, info.model_type.value,weights.name) dest = Path(dest_dir, info.base_type.value, info.model_type.value,weights.name)
copy_file(weights,dest) copy_file(weights,dest)
weights = Path('models', info.base_type.value, info.model_type.value,weights.name) weights = Path('models', info.base_type.value, info.model_type.value,weights.name)
model_name = unique_name(model_name, info)
stanza = { stanza = {
f'{info.base_type.value}/{info.model_type.value}/{model_name}': f'{info.base_type.value}/{info.model_type.value}/{model_name}':
{ {
@ -261,15 +304,16 @@ def main():
os.chdir(root_directory) os.chdir(root_directory)
with open(dest_yaml,'w') as yaml_file: with open(dest_yaml,'w') as yaml_file:
print(yaml.dump({'__metadata__': yaml_file.write(yaml.dump({'__metadata__':
{'version':'3.0.0'} {'version':'3.0.0'}
} }
),file=yaml_file,end="" )
) )
create_directory_structure(dest_directory) create_directory_structure(dest_directory)
migrate_support_models(dest_directory) migrate_support_models(dest_directory)
migrate_conversion_models(dest_directory) migrate_conversion_models(dest_directory)
migrate_tuning_models(dest_directory) migrate_tuning_models(dest_directory)
migrate_converted(dest_directory,yaml_file)
migrate_pipelines(dest_directory,yaml_file) migrate_pipelines(dest_directory,yaml_file)
migrate_checkpoints(dest_directory,yaml_file) migrate_checkpoints(dest_directory,yaml_file)