From ada7399753c20fd5b35c233c18235704d8b97c91 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 15 Jun 2023 23:32:33 -0400 Subject: [PATCH 01/47] rewrite of widget display - marshalling needs rewrite --- .../backend/install/invokeai_configure.py | 199 +++++++------- .../backend/install/model_install_backend.py | 137 ++++++--- .../backend/model_management/model_manager.py | 17 +- .../backend/model_management/models/lora.py | 1 + invokeai/configs/INITIAL_MODELS.yaml | 197 ++++++------- invokeai/frontend/install/model_install.py | 260 +++++++----------- scripts/migrate_models_to_3.0.py | 126 ++++++--- 7 files changed, 473 insertions(+), 464 deletions(-) diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index 603760c0c1..cdb3f47755 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -16,6 +16,7 @@ import shutil import textwrap import traceback import warnings +import yaml from argparse import Namespace from pathlib import Path from shutil import get_terminal_size @@ -25,6 +26,7 @@ from urllib import request import npyscreen import transformers from diffusers import AutoencoderKL +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from huggingface_hub import HfFolder from huggingface_hub import login as hf_hub_login from omegaconf import OmegaConf @@ -34,6 +36,8 @@ from transformers import ( CLIPSegForImageSegmentation, CLIPTextModel, CLIPTokenizer, + AutoFeatureExtractor, + BertTokenizerFast, ) import invokeai.configs as configs @@ -58,6 +62,9 @@ from invokeai.backend.install.model_install_backend import ( recommended_datasets, UserSelections, ) +from invokeai.backend.model_management.model_probe import ( + ModelProbe, ModelType, BaseModelType, SchedulerPredictionType + ) warnings.filterwarnings("ignore") transformers.logging.set_verbosity_error() @@ -81,7 +88,7 @@ INIT_FILE_PREAMBLE = """# InvokeAI initialization file # or renaming it and then running invokeai-configure again. """ -logger=None +logger=InvokeAILogger.getLogger() # -------------------------------------------- def postscript(errors: None): @@ -162,75 +169,91 @@ class ProgressBar: # --------------------------------------------- def download_with_progress_bar(model_url: str, model_dest: str, label: str = "the"): try: - print(f"Installing {label} model file {model_url}...", end="", file=sys.stderr) + logger.info(f"Installing {label} model file {model_url}...") if not os.path.exists(model_dest): os.makedirs(os.path.dirname(model_dest), exist_ok=True) request.urlretrieve( model_url, model_dest, ProgressBar(os.path.basename(model_dest)) ) - print("...downloaded successfully", file=sys.stderr) + logger.info("...downloaded successfully") else: - print("...exists", file=sys.stderr) + logger.info("...exists") except Exception: - print("...download failed", file=sys.stderr) - print(f"Error downloading {label} model", file=sys.stderr) + logger.info("...download failed") + logger.info(f"Error downloading {label} model") print(traceback.format_exc(), file=sys.stderr) -# --------------------------------------------- -# this will preload the Bert tokenizer fles -def download_bert(): - print("Installing bert tokenizer...", file=sys.stderr) - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - from transformers import BertTokenizerFast +def download_conversion_models(): + target_dir = config.root_path / 'models/core/convert' + kwargs = dict() # for future use + try: + logger.info('Downloading core tokenizers and text encoders') - download_from_hf(BertTokenizerFast, "bert-base-uncased") + # bert + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) + bert = BertTokenizerFast.from_pretrained("bert-base-uncased", **kwargs) + bert.save_pretrained(target_dir / 'bert-base-uncased', safe_serialization=True) + + # sd-1 + repo_id = 'openai/clip-vit-large-patch14' + download_from_hf(CLIPTokenizer, repo_id, target_dir / 'clip-vit-large-patch14') + download_from_hf(CLIPTextModel, repo_id, target_dir / 'clip-vit-large-patch14') + # sd-2 + repo_id = "stabilityai/stable-diffusion-2" + pipeline = CLIPTokenizer.from_pretrained(repo_id, subfolder="tokenizer", **kwargs) + pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'tokenizer', safe_serialization=True) -# --------------------------------------------- -def download_sd1_clip(): - print("Installing SD1 clip model...", file=sys.stderr) - version = "openai/clip-vit-large-patch14" - download_from_hf(CLIPTokenizer, version) - download_from_hf(CLIPTextModel, version) + pipeline = CLIPTextModel.from_pretrained(repo_id, subfolder="text_encoder", **kwargs) + pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'text_encoder', safe_serialization=True) + # VAE + logger.info('Downloading stable diffusion VAE') + vae = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse', **kwargs) + vae.save_pretrained(target_dir / 'sd-vae-ft-mse', safe_serialization=True) -# --------------------------------------------- -def download_sd2_clip(): - version = "stabilityai/stable-diffusion-2" - print("Installing SD2 clip model...", file=sys.stderr) - download_from_hf(CLIPTokenizer, version, subfolder="tokenizer") - download_from_hf(CLIPTextModel, version, subfolder="text_encoder") + # safety checking + logger.info('Downloading safety checker') + repo_id = "CompVis/stable-diffusion-safety-checker" + pipeline = AutoFeatureExtractor.from_pretrained(repo_id,**kwargs) + pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True) + pipeline = StableDiffusionSafetyChecker.from_pretrained(repo_id,**kwargs) + pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True) + except KeyboardInterrupt: + raise + except Exception as e: + logger.error(str(e)) # --------------------------------------------- def download_realesrgan(): - print("Installing models from RealESRGAN...", file=sys.stderr) + logger.info("Installing models from RealESRGAN...") model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth" wdn_model_url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth" - model_dest = config.root_path / "models/realesrgan/realesr-general-x4v3.pth" - wdn_model_dest = config.root_path / "models/realesrgan/realesr-general-wdn-x4v3.pth" + model_dest = config.root_path / "models/core/upscaling/realesrgan/realesr-general-x4v3.pth" + wdn_model_dest = config.root_path / "models/core/upscaling/realesrgan/realesr-general-wdn-x4v3.pth" download_with_progress_bar(model_url, str(model_dest), "RealESRGAN") download_with_progress_bar(wdn_model_url, str(wdn_model_dest), "RealESRGANwdn") def download_gfpgan(): - print("Installing GFPGAN models...", file=sys.stderr) + logger.info("Installing GFPGAN models...") for model in ( [ "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth", - "./models/gfpgan/GFPGANv1.4.pth", + "./models/core/face_restoration/gfpgan/GFPGANv1.4.pth", ], [ "https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth", - "./models/gfpgan/weights/detection_Resnet50_Final.pth", + "./models/core/face_restoration/gfpgan/weights/detection_Resnet50_Final.pth", ], [ "https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth", - "./models/gfpgan/weights/parsing_parsenet.pth", + "./models/core/face_restoration/gfpgan/weights/parsing_parsenet.pth", ], ): model_url, model_dest = model[0], config.root_path / model[1] @@ -239,70 +262,32 @@ def download_gfpgan(): # --------------------------------------------- def download_codeformer(): - print("Installing CodeFormer model file...", file=sys.stderr) + logger.info("Installing CodeFormer model file...") model_url = ( "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth" ) - model_dest = config.root_path / "models/codeformer/codeformer.pth" + model_dest = config.root_path / "models/core/face_restoration/codeformer/codeformer.pth" download_with_progress_bar(model_url, str(model_dest), "CodeFormer") # --------------------------------------------- def download_clipseg(): - print("Installing clipseg model for text-based masking...", file=sys.stderr) + logger.info("Installing clipseg model for text-based masking...") CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined" try: - download_from_hf(AutoProcessor, CLIPSEG_MODEL) - download_from_hf(CLIPSegForImageSegmentation, CLIPSEG_MODEL) + download_from_hf(AutoProcessor, CLIPSEG_MODEL, config.root_path / 'models/core/misc/clipseg') + download_from_hf(CLIPSegForImageSegmentation, CLIPSEG_MODEL,'models/core/misc/clipseg') except Exception: - print("Error installing clipseg model:") - print(traceback.format_exc()) + logger.info("Error installing clipseg model:") + logger.info(traceback.format_exc()) -# ------------------------------------- -def download_safety_checker(): - print("Installing model for NSFW content detection...", file=sys.stderr) - try: - from diffusers.pipelines.stable_diffusion.safety_checker import ( - StableDiffusionSafetyChecker, - ) - from transformers import AutoFeatureExtractor - except ModuleNotFoundError: - print("Error installing NSFW checker model:") - print(traceback.format_exc()) - return - safety_model_id = "CompVis/stable-diffusion-safety-checker" - print("AutoFeatureExtractor...", file=sys.stderr) - download_from_hf(AutoFeatureExtractor, safety_model_id) - print("StableDiffusionSafetyChecker...", file=sys.stderr) - download_from_hf(StableDiffusionSafetyChecker, safety_model_id) - - -# ------------------------------------- -def download_vaes(): - print("Installing stabilityai VAE...", file=sys.stderr) - try: - # first the diffusers version - repo_id = "stabilityai/sd-vae-ft-mse" - args = dict( - cache_dir=config.cache_dir, - ) - if not AutoencoderKL.from_pretrained(repo_id, **args): - raise Exception(f"download of {repo_id} failed") - - repo_id = "stabilityai/sd-vae-ft-mse-original" - model_name = "vae-ft-mse-840000-ema-pruned.ckpt" - # next the legacy checkpoint version - if not hf_download_with_resume( - repo_id=repo_id, - model_name=model_name, - model_dir=str(config.root_path / Model_dir / Weights_dir), - ): - raise Exception(f"download of {model_name} failed") - except Exception as e: - print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - +def download_support_models(): + download_realesrgan() + download_gfpgan() + download_codeformer() + download_clipseg() + download_conversion_models() # ------------------------------------- def get_root(root: str = None) -> str: @@ -657,17 +642,13 @@ def default_user_selections(program_opts: Namespace) -> UserSelections: # ------------------------------------- def initialize_rootdir(root: Path, yes_to_all: bool = False): - print("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **") - + logger.info("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **") for name in ( "models", - "configs", - "embeddings", "databases", - "loras", - "controlnets", "text-inversion-output", "text-inversion-training-data", + "configs" ): os.makedirs(os.path.join(root, name), exist_ok=True) @@ -676,6 +657,22 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False): if not os.path.samefile(configs_src, configs_dest): shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True) + dest = root / 'models' + for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]: + for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora, + ModelType.ControlNet,ModelType.TextualInversion]: + path = dest / model_base.value / model_type.value + path.mkdir(parents=True, exist_ok=True) + path = dest / 'core' + path.mkdir(parents=True, exist_ok=True) + + with open(root / 'configs' / 'models.yaml','w') as yaml_file: + yaml_file.write(yaml.dump({'__metadata__': + {'version':'3.0.0'} + } + ) + ) + # ------------------------------------- def run_console_ui( @@ -837,7 +834,7 @@ def main(): old_init_file = config.root_path / 'invokeai.init' new_init_file = config.root_path / 'invokeai.yaml' if old_init_file.exists() and not new_init_file.exists(): - print('** Migrating invokeai.init to invokeai.yaml') + logger.info('** Migrating invokeai.init to invokeai.yaml') migrate_init_file(old_init_file) # Load new init file into config config.parse_args(argv=[],conf=OmegaConf.load(new_init_file)) @@ -855,29 +852,21 @@ def main(): if init_options: write_opts(init_options, new_init_file) else: - print( + logger.info( '\n** CANCELLED AT USER\'S REQUEST. USE THE "invoke.sh" LAUNCHER TO RUN LATER **\n' ) sys.exit(0) if opt.skip_support_models: - print("\n** SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST **") + logger.info("SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST") else: - print("\n** CHECKING/UPDATING SUPPORT MODELS **") - download_bert() - download_sd1_clip() - download_sd2_clip() - download_realesrgan() - download_gfpgan() - download_codeformer() - download_clipseg() - download_safety_checker() - download_vaes() + logger.info("CHECKING/UPDATING SUPPORT MODELS") + download_support_models() if opt.skip_sd_weights: - print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **") + logger.info("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **") elif models_to_download: - print("\n** DOWNLOADING DIFFUSION WEIGHTS **") + logger.info("\n** DOWNLOADING DIFFUSION WEIGHTS **") process_and_execute(opt, models_to_download) postscript(errors=errors) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 18964c774e..60f2d89748 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -9,7 +9,7 @@ import warnings from dataclasses import dataclass,field from pathlib import Path from tempfile import TemporaryFile -from typing import List, Dict, Callable +from typing import List, Dict, Set, Callable import requests from diffusers import AutoencoderKL @@ -20,8 +20,8 @@ from tqdm import tqdm import invokeai.configs as configs - from invokeai.app.services.config import InvokeAIAppConfig +from invokeai.backend.model_management import ModelManager, ModelType, BaseModelType from ..stable_diffusion import StableDiffusionGeneratorPipeline from ..util.logging import InvokeAILogger @@ -62,7 +62,6 @@ class ModelInstallList: class UserSelections(): install_models: List[str]= field(default_factory=list) remove_models: List[str]=field(default_factory=list) - purge_deleted_models: bool=field(default_factory=list) install_cn_models: List[str] = field(default_factory=list) remove_cn_models: List[str] = field(default_factory=list) install_lora_models: List[str] = field(default_factory=list) @@ -72,6 +71,64 @@ class UserSelections(): scan_directory: Path = None autoscan_on_startup: bool=False import_model_paths: str=None + +@dataclass +class ModelLoadInfo(): + name: str + model_type: ModelType + base_type: BaseModelType + path: Path = None + repo_id: str = None + description: str = '' + installed: bool = False + recommended: bool = False + +class ModelInstall(object): + def __init__(self,config:InvokeAIAppConfig): + self.config = config + self.mgr = ModelManager(config.model_conf_path) + self.datasets = OmegaConf.load(Dataset_path) + + def all_models(self)->Dict[str,ModelLoadInfo]: + ''' + Return dict of model_key=>ModelStatus + ''' + model_dict = dict() + # first populate with the entries in INITIAL_MODELS.yaml + for key, value in self.datasets.items(): + name,base,model_type = ModelManager.parse_key(key) + value['name'] = name + value['base_type'] = base + value['model_type'] = model_type + model_dict[key] = ModelLoadInfo(**value) + + # supplement with entries in models.yaml + installed_models = self.mgr.list_models() + for base in installed_models.keys(): + for model_type in installed_models[base].keys(): + for name, value in installed_models[base][model_type].items(): + key = ModelManager.create_key(name, base, model_type) + if key in model_dict: + model_dict[key].installed = True + else: + model_dict[key] = ModelLoadInfo( + name = name, + base_type = base, + model_type = model_type, + description = value.get('description'), + path = value.get('path'), + installed = True, + ) + return {x : model_dict[x] for x in sorted(model_dict.keys(),key=lambda y: model_dict[y].name.lower())} + + def starter_models(self)->Set[str]: + models = set() + for key, value in self.datasets.items(): + name,base,model_type = ModelManager.parse_key(key) + if model_type==ModelType.Pipeline: + models.add(key) + return models + def default_config_file(): return config.model_conf_path @@ -85,6 +142,15 @@ def initial_models(): return Datasets return (Datasets := OmegaConf.load(Dataset_path)['diffusers']) +def add_models(model_manager, config_file_path: Path, models: List[tuple[str,str,str]]): + print(f'Installing {models}') + +def del_models(model_manager, config_file_path: Path, models: List[tuple[str,str,str]]): + for base, model_type, name in models: + logger.info(f"Deleting {name}...") + model_manager.del_model(name, base, model_type) + model_manager.commit(config_file_path) + def install_requested_models( diffusers: ModelInstallList = None, controlnet: ModelInstallList = None, @@ -95,9 +161,8 @@ def install_requested_models( external_models: List[str] = None, scan_at_startup: bool = False, precision: str = "float16", - purge_deleted: bool = False, config_file_path: Path = None, - model_config_file_callback: Callable[[Path],Path] = None + model_config_file_callback: Callable[[Path],Path] = None, ): """ Entry point for installing/deleting starter models, or installing external models. @@ -110,40 +175,27 @@ def install_requested_models( # prevent circular import here from ..model_management import ModelManager model_manager = ModelManager(OmegaConf.load(config_file_path), precision=precision) - if controlnet: - model_manager.install_controlnet_models(controlnet.install_models, access_token=access_token) - model_manager.delete_controlnet_models(controlnet.remove_models) - if lora: - model_manager.install_lora_models(lora.install_models, access_token=access_token) - model_manager.delete_lora_models(lora.remove_models) + for x in [controlnet, lora, ti, diffusers]: + if x: + add_models(model_manager, config_file_path, x.install_models) + del_models(model_manager, config_file_path, x.remove_models) + + # if diffusers: - if ti: - model_manager.install_ti_models(ti.install_models, access_token=access_token) - model_manager.delete_ti_models(ti.remove_models) - - if diffusers: - # TODO: Replace next three paragraphs with calls into new model manager - if diffusers.remove_models and len(diffusers.remove_models) > 0: - logger.info("Processing requested deletions") - for model in diffusers.remove_models: - logger.info(f"{model}...") - model_manager.del_model(model, delete_files=purge_deleted) - model_manager.commit(config_file_path) - - if diffusers.install_models and len(diffusers.install_models) > 0: - logger.info("Installing requested models") - downloaded_paths = download_weight_datasets( - models=diffusers.install_models, - access_token=None, - precision=precision, - ) - successful = {x:v for x,v in downloaded_paths.items() if v is not None} - if len(successful) > 0: - update_config_file(successful, config_file_path) - if len(successful) < len(diffusers.install_models): - unsuccessful = [x for x in downloaded_paths if downloaded_paths[x] is None] - logger.warning(f"Some of the model downloads were not successful: {unsuccessful}") + # if diffusers.install_models and len(diffusers.install_models) > 0: + # logger.info("Installing requested models") + # downloaded_paths = download_weight_datasets( + # models=diffusers.install_models, + # access_token=None, + # precision=precision, + # ) + # successful = {x:v for x,v in downloaded_paths.items() if v is not None} + # if len(successful) > 0: + # update_config_file(successful, config_file_path) + # if len(successful) < len(diffusers.install_models): + # unsuccessful = [x for x in downloaded_paths if downloaded_paths[x] is None] + # logger.warning(f"Some of the model downloads were not successful: {unsuccessful}") # due to above, we have to reload the model manager because conf file # was changed behind its back @@ -156,8 +208,8 @@ def install_requested_models( if len(external_models) > 0: logger.info("INSTALLING EXTERNAL MODELS") for path_url_or_repo in external_models: + logger.debug(path_url_or_repo) try: - logger.debug(f'In install_requested_models; callback = {model_config_file_callback}') model_manager.heuristic_import( path_url_or_repo, commit_to_conf=config_file_path, @@ -280,21 +332,18 @@ def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path: # --------------------------------------------- def download_from_hf( - model_class: object, model_name: str, **kwargs + model_class: object, model_name: str, destination: Path, **kwargs ): logger = InvokeAILogger.getLogger('InvokeAI') logger.addFilter(lambda x: 'fp16 is not a valid' not in x.getMessage()) - path = config.cache_dir model = model_class.from_pretrained( model_name, - cache_dir=path, resume_download=True, **kwargs, ) - model_name = "--".join(("models", *model_name.split("/"))) - return path / model_name if model else None - + model.save_pretrained(destination, safe_serialization=True) + return destination def _download_diffusion_weights( mconfig: DictConfig, access_token: str, precision: str = "float32" diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index fd0141c3f7..79c6573f4f 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -305,7 +305,8 @@ class ModelManager(object): ) -> str: return f"{base_model}/{model_type}/{model_name}" - def parse_key(self, model_key: str) -> Tuple[str, BaseModelType, ModelType]: + @classmethod + def parse_key(cls, model_key: str) -> Tuple[str, BaseModelType, ModelType]: base_model_str, model_type_str, model_name = model_key.split('/', 2) try: model_type = ModelType(model_type_str) @@ -548,7 +549,7 @@ class ModelManager(object): line = f'{model_info["name"]:25s} {model_info["type"]:10s} {model_info["description"]}' print(line) - # TODO: test when ui implemented + # Tested - LS def del_model( self, model_name: str, @@ -558,7 +559,6 @@ class ModelManager(object): """ Delete the named model. """ - raise Exception("TODO: del_model") # TODO: redo model_key = self.create_key(model_name, base_model, model_type) model_cfg = self.models.pop(model_key, None) @@ -574,10 +574,11 @@ class ModelManager(object): self.cache.uncache_model(cache_id) # if model inside invoke models folder - delete files - if model_cfg.path.startswith("models/") or model_cfg.path.startswith("models\\"): - model_path = self.globals.root_dir / model_cfg.path - if model_path.isdir(): - shutil.rmtree(str(model_path)) + model_path = self.globals.root_path / model_cfg.path + + if model_path.is_relative_to(self.globals.models_path): + if model_path.is_dir(): + rmtree(str(model_path)) else: model_path.unlink() @@ -712,5 +713,5 @@ class ModelManager(object): self.models[model_key] = model_config new_models_found = True - if new_models_found: + if new_models_found and self.config_path: self.commit() diff --git a/invokeai/backend/model_management/models/lora.py b/invokeai/backend/model_management/models/lora.py index c69677fd0c..bcf3224ece 100644 --- a/invokeai/backend/model_management/models/lora.py +++ b/invokeai/backend/model_management/models/lora.py @@ -1,3 +1,4 @@ +import os import torch from typing import Optional, Union, Literal from .base import ( diff --git a/invokeai/configs/INITIAL_MODELS.yaml b/invokeai/configs/INITIAL_MODELS.yaml index 6bf3d4231a..ccb7ca09aa 100644 --- a/invokeai/configs/INITIAL_MODELS.yaml +++ b/invokeai/configs/INITIAL_MODELS.yaml @@ -1,107 +1,92 @@ # This file predefines a few models that the user may want to install. -diffusers: - stable-diffusion-1.5: - description: Stable Diffusion version 1.5 diffusers model (4.27 GB) - repo_id: runwayml/stable-diffusion-v1-5 - format: diffusers - vae: - repo_id: stabilityai/sd-vae-ft-mse - recommended: True - default: True - sd-inpainting-1.5: - description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB) - repo_id: runwayml/stable-diffusion-inpainting - format: diffusers - vae: - repo_id: stabilityai/sd-vae-ft-mse - recommended: True - stable-diffusion-2.1: - description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB) - repo_id: stabilityai/stable-diffusion-2-1 - format: diffusers - recommended: True - sd-inpainting-2.0: - description: Stable Diffusion version 2.0 inpainting model (5.21 GB) - repo_id: stabilityai/stable-diffusion-2-inpainting - format: diffusers - recommended: False - analog-diffusion-1.0: - description: An SD-1.5 model trained on diverse analog photographs (2.13 GB) - repo_id: wavymulder/Analog-Diffusion - format: diffusers - recommended: false - deliberate-1.0: - description: Versatile model that produces detailed images up to 768px (4.27 GB) - format: diffusers - repo_id: XpucT/Deliberate - recommended: False - d&d-diffusion-1.0: - description: Dungeons & Dragons characters (2.13 GB) - format: diffusers - repo_id: 0xJustin/Dungeons-and-Diffusion - recommended: False - dreamlike-photoreal-2.0: - description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB) - format: diffusers - repo_id: dreamlike-art/dreamlike-photoreal-2.0 - recommended: False - inkpunk-1.0: - description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB) - format: diffusers - repo_id: Envvi/Inkpunk-Diffusion - recommended: False - openjourney-4.0: - description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB) - format: diffusers - repo_id: prompthero/openjourney - vae: - repo_id: stabilityai/sd-vae-ft-mse - recommended: False - portrait-plus-1.0: - description: An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB) - format: diffusers - repo_id: wavymulder/portraitplus - recommended: False - seek-art-mega-1.0: - description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB) - repo_id: coreco/seek.art_MEGA - format: diffusers - vae: - repo_id: stabilityai/sd-vae-ft-mse - recommended: False - trinart-2.0: - description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB) - repo_id: naclbit/trinart_stable_diffusion_v2 - format: diffusers - vae: - repo_id: stabilityai/sd-vae-ft-mse - recommended: False - waifu-diffusion-1.4: - description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB) - repo_id: hakurei/waifu-diffusion - format: diffusers - vae: - repo_id: stabilityai/sd-vae-ft-mse - recommended: False -controlnet: - canny: lllyasviel/control_v11p_sd15_canny - inpaint: lllyasviel/control_v11p_sd15_inpaint - mlsd: lllyasviel/control_v11p_sd15_mlsd - depth: lllyasviel/control_v11f1p_sd15_depth - normal_bae: lllyasviel/control_v11p_sd15_normalbae - seg: lllyasviel/control_v11p_sd15_seg - lineart: lllyasviel/control_v11p_sd15_lineart - lineart_anime: lllyasviel/control_v11p_sd15s2_lineart_anime - scribble: lllyasviel/control_v11p_sd15_scribble - softedge: lllyasviel/control_v11p_sd15_softedge - shuffle: lllyasviel/control_v11e_sd15_shuffle - tile: lllyasviel/control_v11f1e_sd15_tile - ip2p: lllyasviel/control_v11e_sd15_ip2p -textual_inversion: - 'EasyNegative': https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors - 'ahx-beta-453407d': sd-concepts-library/ahx-beta-453407d -lora: - 'LowRA': https://civitai.com/api/download/models/63006 - 'Ink scenery': https://civitai.com/api/download/models/83390 - 'sd-model-finetuned-lora-t4': sayakpaul/sd-model-finetuned-lora-t4 - +sd-1/pipeline/stable-diffusion-v1-5: + description: Stable Diffusion version 1.5 diffusers model (4.27 GB) + repo_id: runwayml/stable-diffusion-v1-5 + recommended: True +sd-1/pipeline/stable-diffusion-inpainting: + description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB) + repo_id: runwayml/stable-diffusion-inpainting + recommended: True +sd-2/pipeline/stable-diffusion-2-1: + description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB) + repo_id: stabilityai/stable-diffusion-2-1 + recommended: True +sd-2/pipeline/stable-diffusion-2-inpainting: + description: Stable Diffusion version 2.0 inpainting model (5.21 GB) + repo_id: stabilityai/stable-diffusion-2-inpainting + recommended: False +sd-1/pipeline/Analog-Diffusion: + description: An SD-1.5 model trained on diverse analog photographs (2.13 GB) + repo_id: wavymulder/Analog-Diffusion + recommended: false +sd-1/pipeline/Deliberate: + description: Versatile model that produces detailed images up to 768px (4.27 GB) + repo_id: XpucT/Deliberate + recommended: False +sd-1/pipeline/Dungeons-and-Diffusion: + description: Dungeons & Dragons characters (2.13 GB) + repo_id: 0xJustin/Dungeons-and-Diffusion + recommended: False +sd-1/pipeline/dreamlike-photoreal-2.0: + description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB) + repo_id: dreamlike-art/dreamlike-photoreal-2.0 + recommended: False +sd-1/pipeline/Inkpunk-Diffusion: + description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB) + repo_id: Envvi/Inkpunk-Diffusion + recommended: False +sd-1/pipeline/openjourney: + description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB) + repo_id: prompthero/openjourney + recommended: False +sd-1/pipeline/portraitplus: + description: An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB) + repo_id: wavymulder/portraitplus + recommended: False +sd-1/pipeline/seek.art_MEGA: + description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB) + recommended: False +sd-1/pipeline/trinart_stable_diffusion_v2: + description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB) + repo_id: naclbit/trinart_stable_diffusion_v2 + recommended: False +sd-1/pipeline/waifu-diffusion: + description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB) + repo_id: hakurei/waifu-diffusion + recommended: False +sd-1/controlnet/canny: + repo_id: lllyasviel/control_v11p_sd15_canny +sd-1/controlnet/inpaint: + repo_id: lllyasviel/control_v11p_sd15_inpaint +sd-1/controlnet/mlsd: + repo_id: lllyasviel/control_v11p_sd15_mlsd +sd-1/controlnet/depth: + repo_id: lllyasviel/control_v11f1p_sd15_depth +sd-1/controlnet/normal_bae: + repo_id: lllyasviel/control_v11p_sd15_normalbae +sd-1/controlnet/seg: + repo_id: lllyasviel/control_v11p_sd15_seg +sd-1/controlnet/lineart: + repo_id: lllyasviel/control_v11p_sd15_lineart +sd-1/controlnet/lineart_anime: + repo_id: lllyasviel/control_v11p_sd15s2_lineart_anime +sd-1/controlnet/scribble: + repo_id: lllyasviel/control_v11p_sd15_scribble +sd-1/controlnet/softedge: + repo_id: lllyasviel/control_v11p_sd15_softedge +sd-1/controlnet/shuffle: + repo_id: lllyasviel/control_v11e_sd15_shuffle +sd-1/controlnet/tile: + repo_id: lllyasviel/control_v11f1e_sd15_tile +sd-1/controlnet/ip2p: + repo_id: lllyasviel/control_v11e_sd15_ip2p +sd-1/embedding/EasyNegative: + path: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors +sd-1/embedding/ahx-beta-453407d: + repo_id: sd-concepts-library/ahx-beta-453407d +sd-1/lora/LowRA: + path: https://civitai.com/api/download/models/63006 +sd-1/lora/Ink Scenery: + path: https://civitai.com/api/download/models/83390 +sd-1/lora/sd-model-finetuned-lora-t4: + repo_id: sayakpaul/sd-model-finetuned-lora-t4 diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 265c456e3a..1753364f64 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -31,17 +31,17 @@ from omegaconf import OmegaConf from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.install.model_install_backend import ( - Dataset_path, + Dataset_path, # most of these should go!! default_config_file, default_dataset, install_requested_models, recommended_datasets, ModelInstallList, UserSelections, + ModelInstall ) -from invokeai.backend import ModelManager +from invokeai.backend.model_management import ModelManager, BaseModelType, ModelType from invokeai.backend.util import choose_precision, choose_torch_device -from invokeai.backend.util.logging import InvokeAILogger from invokeai.frontend.install.widgets import ( CenteredTitleText, MultiSelectColumns, @@ -58,6 +58,7 @@ from invokeai.frontend.install.widgets import ( from invokeai.app.services.config import InvokeAIAppConfig config = InvokeAIAppConfig.get_config() +logger = InvokeAILogger.getLogger() # build a table mapping all non-printable characters to None # for stripping control characters @@ -90,25 +91,10 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): if not config.model_conf_path.exists(): with open(config.model_conf_path,'w') as file: print('# InvokeAI model configuration file',file=file) - model_manager = ModelManager(config.model_conf_path) - - self.starter_models = OmegaConf.load(Dataset_path)['diffusers'] - self.installed_diffusers_models = self.list_additional_diffusers_models( - model_manager, - self.starter_models, - ) - self.installed_cn_models = model_manager.list_controlnet_models() - self.installed_lora_models = model_manager.list_lora_models() - self.installed_ti_models = model_manager.list_ti_models() - - try: - self.existing_models = OmegaConf.load(default_config_file()) - except: - self.existing_models = dict() - - self.starter_model_list = list(self.starter_models.keys()) - self.installed_models = dict() - + self.installer = ModelInstall(config) + self.all_models = self.installer.all_models() + self.starter_models = self.installer.starter_models() + self.model_labels = self._get_model_labels() window_width, window_height = get_terminal_size() self.nextrely -= 1 @@ -141,39 +127,36 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): scroll_exit = True, ) self.tabs.on_changed = self._toggle_tables - + top_of_table = self.nextrely - self.starter_diffusers_models = self.add_starter_diffusers() + self.starter_pipelines = self.add_starter_pipelines() bottom_of_table = self.nextrely self.nextrely = top_of_table - self.diffusers_models = self.add_diffusers_widgets( - predefined_models=self.installed_diffusers_models, - model_type='Diffusers', + self.pipeline_models = self.add_model_widgets( + model_type=ModelType.Pipeline, window_width=window_width, + exclude = self.starter_models ) bottom_of_table = max(bottom_of_table,self.nextrely) self.nextrely = top_of_table self.controlnet_models = self.add_model_widgets( - predefined_models=self.installed_cn_models, - model_type='ControlNet', + model_type=ModelType.ControlNet, window_width=window_width, ) bottom_of_table = max(bottom_of_table,self.nextrely) self.nextrely = top_of_table self.lora_models = self.add_model_widgets( - predefined_models=self.installed_lora_models, - model_type="LoRA/LyCORIS", + model_type=ModelType.Lora, window_width=window_width, ) bottom_of_table = max(bottom_of_table,self.nextrely) self.nextrely = top_of_table self.ti_models = self.add_model_widgets( - predefined_models=self.installed_ti_models, - model_type="Textual Inversion Embeddings", + model_type=ModelType.TextualInversion, window_width=window_width, ) bottom_of_table = max(bottom_of_table,self.nextrely) @@ -220,18 +203,20 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self._toggle_tables([self.current_tab]) ############# diffusers tab ########## - def add_starter_diffusers(self)->dict[str, npyscreen.widget]: + def add_starter_pipelines(self)->dict[str, npyscreen.widget]: '''Add widgets responsible for selecting diffusers models''' widgets = dict() - - starter_model_labels = self._get_starter_model_labels() - recommended_models = [ + models = self.all_models + starters = self.starter_models + starter_model_labels = self.model_labels + + recommended_models = set([ x - for x in self.starter_model_list - if self.starter_models[x].get("recommended", False) - ] + for x in starters + if models[x].recommended + ]) self.installed_models = sorted( - [x for x in list(self.starter_models.keys()) if x in self.existing_models] + [x for x in starters if models[x].installed] ) widgets.update( @@ -246,55 +231,46 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self.nextrely -= 1 # if user has already installed some initial models, then don't patronize them # by showing more recommendations - show_recommended = not self.existing_models + show_recommended = len(self.installed_models)==0 + keys = [x for x in models.keys() if x in starters] widgets.update( models_selected = self.add_widget_intelligent( MultiSelectColumns, columns=1, name="Install Starter Models", - values=starter_model_labels, + values=[starter_model_labels[x] for x in keys], value=[ - self.starter_model_list.index(x) - for x in self.starter_model_list - if (show_recommended and x in recommended_models)\ - or (x in self.existing_models) + keys.index(x) + for x in keys + if (show_recommended and models[x].recommended) \ + or (x in self.installed_models) ], - max_height=len(starter_model_labels) + 1, + max_height=len(starters) + 1, relx=4, scroll_exit=True, - ) + ), + models = keys, ) - widgets.update( - purge_deleted = self.add_widget_intelligent( - npyscreen.Checkbox, - name="Purge unchecked diffusers models from disk", - value=False, - scroll_exit=True, - relx=4, - ) - ) - widgets['purge_deleted'].when_value_edited = lambda: self.sync_purge_buttons(widgets['purge_deleted']) - self.nextrely += 1 return widgets ############# Add a set of model install widgets ######## def add_model_widgets(self, - predefined_models: dict[str,bool], - model_type: str, + model_type: ModelType, window_width: int=120, install_prompt: str=None, - add_purge_deleted: bool=False, + exclude: set=set(), )->dict[str,npyscreen.widget]: '''Generic code to create model selection widgets''' widgets = dict() - model_list = sorted(predefined_models.keys()) + model_list = [x for x in self.all_models if self.all_models[x].model_type==model_type and not x in exclude] + model_labels = [self.model_labels[x] for x in model_list] if len(model_list) > 0: - max_width = max([len(x) for x in model_list]) + max_width = max([len(x) for x in model_labels]) columns = window_width // (max_width+8) # 8 characters for "[x] " and padding columns = min(len(model_list),columns) or 1 - prompt = install_prompt or f"Select the desired {model_type} models to install. Unchecked models will be purged from disk." + prompt = install_prompt or f"Select the desired {model_type.value.title()} models to install. Unchecked models will be purged from disk." widgets.update( label1 = self.add_widget_intelligent( @@ -310,31 +286,19 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): MultiSelectColumns, columns=columns, name=f"Install {model_type} Models", - values=model_list, + values=model_labels, value=[ model_list.index(x) for x in model_list - if predefined_models[x] + if self.all_models[x].installed ], max_height=len(model_list)//columns + 1, relx=4, scroll_exit=True, - ) + ), + models = model_list, ) - if add_purge_deleted: - self.nextrely += 1 - widgets.update( - purge_deleted = self.add_widget_intelligent( - npyscreen.Checkbox, - name="Purge unchecked diffusers models from disk", - value=False, - scroll_exit=True, - relx=4, - ) - ) - widgets['purge_deleted'].when_value_edited = lambda: self.sync_purge_buttons(widgets['purge_deleted']) - self.nextrely += 1 widgets.update( download_ids = self.add_widget_intelligent( @@ -349,18 +313,15 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): ### Tab for arbitrary diffusers widgets ### def add_diffusers_widgets(self, - predefined_models: dict[str,bool], - model_type: str='Diffusers', + model_type: ModelType=ModelType.Pipeline, window_width: int=120, )->dict[str,npyscreen.widget]: '''Similar to add_model_widgets() but adds some additional widgets at the bottom to support the autoload directory''' widgets = self.add_model_widgets( - predefined_models, - 'Diffusers', - window_width, - install_prompt="Additional diffusers models already installed.", - add_purge_deleted=True + model_type = model_type, + window_width = window_width, + install_prompt=f"Additional {model_type.value.title()} models already installed.", ) label = "Directory to scan for models to automatically import ( autocompletes):" @@ -390,21 +351,17 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): ) return widgets - def sync_purge_buttons(self,checkbox): - value = checkbox.value - self.starter_diffusers_models['purge_deleted'].value = value - self.diffusers_models['purge_deleted'].value = value - def resize(self): super().resize() - if (s := self.starter_diffusers_models.get("models_selected")): - s.values = self._get_starter_model_labels() + if (s := self.starter_pipelines.get("models_selected")): + keys = [x for x in self.all_models.keys() if x in self.starter_models] + s.values = [self.model_labels[x] for x in keys] def _toggle_tables(self, value=None): selected_tab = value[0] widgets = [ - self.starter_diffusers_models, - self.diffusers_models, + self.starter_pipelines, + self.pipeline_models, self.controlnet_models, self.lora_models, self.ti_models, @@ -412,34 +369,38 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): for group in widgets: for k,v in group.items(): - v.hidden = True - v.editable = False + try: + v.hidden = True + v.editable = False + except: + pass for k,v in widgets[selected_tab].items(): - v.hidden = False - if not isinstance(v,(npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)): - v.editable = True + try: + v.hidden = False + if not isinstance(v,(npyscreen.FixedText, npyscreen.TitleFixedText, CenteredTitleText)): + v.editable = True + except: + pass self.__class__.current_tab = selected_tab # for persistence self.display() - def _get_starter_model_labels(self) -> List[str]: + def _get_model_labels(self) -> dict[str,str]: window_width, window_height = get_terminal_size() - label_width = 25 checkbox_width = 4 spacing_width = 2 + + models = self.all_models + label_width = max([len(models[x].name) for x in models]) description_width = window_width - label_width - checkbox_width - spacing_width - im = self.starter_models - names = self.starter_model_list - descriptions = [ - im[x].description[0 : description_width - 3] + "..." - if len(im[x].description) > description_width - else im[x].description - for x in names - ] - return [ - f"%-{label_width}s %s" % (names[x], descriptions[x]) - for x in range(0, len(names)) - ] + result = dict() + for x in models.keys(): + description = models[x].description + description = description[0 : description_width - 3] + "..." \ + if description and len(description) > description_width \ + else description if description else '' + result[x] = f"%-{label_width}s %s" % (models[x].name, description) + return result def _get_columns(self) -> int: window_width, window_height = get_terminal_size() @@ -548,8 +509,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): # rebuild the form, saving and restoring some of the fields that need to be preserved. saved_messages = self.monitor.entry_widget.values - autoload_dir = self.diffusers_models['autoload_directory'].value - autoscan = self.diffusers_models['autoscan_on_startup'].value + autoload_dir = self.pipeline_models['autoload_directory'].value + autoscan = self.pipeline_models['autoscan_on_startup'].value app.main_form = app.addForm( "MAIN", addModelsForm, name="Install Stable Diffusion Models", multipage=self.multipage, @@ -558,23 +519,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): app.main_form.monitor.entry_widget.values = saved_messages app.main_form.monitor.entry_widget.buffer([''],scroll_end=True) - app.main_form.diffusers_models['autoload_directory'].value = autoload_dir - app.main_form.diffusers_models['autoscan_on_startup'].value = autoscan - - ############################################################### - - def list_additional_diffusers_models(self, - manager: ModelManager, - starters:dict - )->dict[str,bool]: - '''Return a dict of all the currently installed models that are not on the starter list''' - model_info = manager.list_models() - additional_models = { - x:True for x in model_info \ - if model_info[x]['format']=='diffusers' \ - and x not in starters - } - return additional_models + app.main_form.pipeline_models['autoload_directory'].value = autoload_dir + app.main_form.pipeline_models['autoscan_on_startup'].value = autoscan def marshall_arguments(self): """ @@ -591,24 +537,20 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): selections = self.parentApp.user_selections # Starter models to install/remove - starter_models = dict( - map( - lambda x: (self.starter_model_list[x], True), - self.starter_diffusers_models['models_selected'].value, - ) - ) - selections.purge_deleted_models = self.starter_diffusers_models['purge_deleted'].value or \ - self.diffusers_models['purge_deleted'].value - - selections.install_models = [x for x in starter_models if x not in self.existing_models] - selections.remove_models = [x for x in self.starter_model_list if x in self.existing_models and x not in starter_models] - + # TO DO - turn these into a dict so we don't have to hard-code the attributes + print(f'installed={[x for x in self.all_models if self.all_models[x].installed]}',file=f) + for section in [self.starter_pipelines, self.pipeline_models, + self.controlnet_models, self.lora_models, self.ti_models]: + selected = set([section['models'][x] for x in section['models_selected'].value]) + models_to_install = [x for x in selected if not self.all_models[x].installed] + models_to_remove = [x for x in section['models'] if x not in selected and self.all_models[x].installed] + # "More" models - selections.import_model_paths = self.diffusers_models['download_ids'].value.split() - if diffusers_selected := self.diffusers_models.get('models_selected'): + selections.import_model_paths = self.pipeline_models['download_ids'].value.split() + if diffusers_selected := self.pipeline_models.get('models_selected'): selections.remove_models.extend([x for x in diffusers_selected.values - if self.installed_diffusers_models[x] + if self.installed_pipeline_models[x] and diffusers_selected.values.index(x) not in diffusers_selected.value ] ) @@ -659,9 +601,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): selections.install_ti_models.extend(additional_tis) # load directory and whether to scan on startup - selections.scan_directory = self.diffusers_models['autoload_directory'].value - selections.autoscan_on_startup = self.diffusers_models['autoscan_on_startup'].value - + selections.scan_directory = self.pipeline_models['autoload_directory'].value + selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value class AddModelApplication(npyscreen.NPSAppManaged): def __init__(self,opt): @@ -761,19 +702,19 @@ def process_and_execute(opt: Namespace, directory_to_scan = selections.scan_directory scan_at_startup = selections.autoscan_on_startup potential_models_to_install = selections.import_model_paths + name_map = selections.model_name_map install_requested_models( - diffusers = ModelInstallList(models_to_install, models_to_remove), - controlnet = ModelInstallList(selections.install_cn_models, selections.remove_cn_models), - lora = ModelInstallList(selections.install_lora_models, selections.remove_lora_models), - ti = ModelInstallList(selections.install_ti_models, selections.remove_ti_models), + diffusers = ModelInstallList(models_to_install, [name_map[ModelType.Pipeline][x] for x in models_to_remove]), + controlnet = ModelInstallList(selections.install_cn_models, [name_map[ModelType.ControlNet][x] for x in selections.remove_cn_models]), + lora = ModelInstallList(selections.install_lora_models, [name_map[ModelType.Lora][x] for x in selections.remove_lora_models]), + ti = ModelInstallList(selections.install_ti_models, [name_map[ModelType.TextualInversion][x] for x in selections.remove_ti_models]), scan_directory=Path(directory_to_scan) if directory_to_scan else None, external_models=potential_models_to_install, scan_at_startup=scan_at_startup, precision="float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device())), - purge_deleted=selections.purge_deleted_models, config_file_path=Path(opt.config_file) if opt.config_file else config.model_conf_path, model_config_file_callback = lambda x: ask_user_for_config_file(x,conn_out) ) @@ -828,7 +769,6 @@ def select_and_download_models(opt: Namespace): ti=ModelInstallList(**{action:opt.textual_inversions or []}), lora=ModelInstallList(**{action:opt.loras or []}), precision=precision, - purge_deleted=True, model_config_file_callback=lambda x: ask_user_for_config_file(x), ) elif opt.default_only: diff --git a/scripts/migrate_models_to_3.0.py b/scripts/migrate_models_to_3.0.py index 96bf362341..2d498df237 100644 --- a/scripts/migrate_models_to_3.0.py +++ b/scripts/migrate_models_to_3.0.py @@ -24,14 +24,32 @@ from transformers import ( ) import invokeai.backend.util.logging as logger +from invokeai.backend.model_management import ModelManager from invokeai.backend.model_management.model_probe import ( - ModelProbe, ModelType, BaseModelType + ModelProbe, ModelType, BaseModelType, SchedulerPredictionType, ModelVariantInfo ) warnings.filterwarnings("ignore") transformers.logging.set_verbosity_error() diffusers.logging.set_verbosity_error() +model_names = set() + +def unique_name(name,info)->str: + done = False + key = ModelManager.create_key(name,info.base_type,info.model_type) + unique_name = key + counter = 1 + while not done: + if unique_name in model_names: + unique_name = f'{key}-{counter:0>2d}' + counter += 1 + else: + done = True + model_names.add(unique_name) + name,_,_ = ModelManager.parse_key(unique_name) + return name + def create_directory_structure(dest: Path): for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]: for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora, @@ -113,10 +131,10 @@ def migrate_conversion_models(dest_directory: Path): # sd-1 repo_id = 'openai/clip-vit-large-patch14' pipeline = CLIPTokenizer.from_pretrained(repo_id, **kwargs) - pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14', safe_serialization=True) + pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14' / 'tokenizer', safe_serialization=True) pipeline = CLIPTextModel.from_pretrained(repo_id, **kwargs) - pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14', safe_serialization=True) + pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14' / 'text_encoder', safe_serialization=True) # sd-2 repo_id = "stabilityai/stable-diffusion-2" @@ -153,12 +171,48 @@ def migrate_tuning_models(dest: Path): logger.info(f'Scanning {subdir}') migrate_models(src, dest) +def write_yaml(model_name: str, path:Path, info:ModelVariantInfo, dest_yaml: io.TextIOBase): + name = unique_name(model_name, info) + stanza = { + f'{info.base_type.value}/{info.model_type.value}/{name}': { + 'name': model_name, + 'path': str(path), + 'description': f'diffusers model {model_name}', + 'format': 'diffusers', + 'image_size': info.image_size, + 'base': info.base_type.value, + 'variant': info.variant_type.value, + 'prediction_type': info.prediction_type.value, + 'upcast_attention': info.prediction_type == SchedulerPredictionType.VPrediction + } + } + dest_yaml.write(yaml.dump(stanza)) + dest_yaml.flush() + +def migrate_converted(dest_dir: Path, dest_yaml: io.TextIOBase): + for sub_dir in [Path('./models/converted_ckpts'),Path('./models/optimize-ckpts')]: + for model in sub_dir.glob('*'): + if not model.is_dir(): + continue + info = ModelProbe().heuristic_probe(model) + if not info: + continue + dest = Path(dest_dir, info.base_type.value, info.model_type.value, model.name) + try: + copy_dir(model,dest) + rel_path = Path('models',dest.relative_to(dest_dir)) + write_yaml(model.name,path=rel_path,info=info, dest_yaml=dest_yaml) + except KeyboardInterrupt: + raise + except Exception as e: + logger.warning(f'Could not migrate the converted diffusers {model.name}: {str(e)}. Skipping.') + def migrate_pipelines(dest_dir: Path, dest_yaml: io.TextIOBase): cache = Path('./models/hub') kwargs = dict( cache_dir = cache, - local_files_only = True, safety_checker = None, + # local_files_only = True, ) for model in cache.glob('models--*'): if len(list(model.glob('snapshots/**/model_index.json')))==0: @@ -166,38 +220,26 @@ def migrate_pipelines(dest_dir: Path, dest_yaml: io.TextIOBase): _,owner,repo_name=model.name.split('--') repo_id = f'{owner}/{repo_name}' revisions = [x.name for x in model.glob('refs/*')] - for revision in revisions: - logger.info(f'Migrating {repo_id}, revision {revision}') - try: - pipeline = StableDiffusionPipeline.from_pretrained( - repo_id, - revision=revision, - **kwargs) - info = ModelProbe().heuristic_probe(pipeline) - if not info: - continue - dest = Path(dest_dir, info.base_type.value, info.model_type.value, f'{repo_name}-{revision}') - pipeline.save_pretrained(dest, safe_serialization=True) - rel_path = Path('models',dest.relative_to(dest_dir)) - stanza = { - f'{info.base_type.value}/{info.model_type.value}/{repo_name}-{revision}': - { - 'name': repo_name, - 'path': str(rel_path), - 'description': f'diffusers model {repo_id}', - 'format': 'diffusers', - 'image_size': info.image_size, - 'base': info.base_type.value, - 'variant': info.variant_type.value, - 'prediction_type': info.prediction_type.value, - } - } - print(yaml.dump(stanza),file=dest_yaml,end="") - dest_yaml.flush() - except KeyboardInterrupt: - raise - except Exception as e: - logger.warning(f'Could not load the "{revision}" version of {repo_id}. Skipping.') + + # if an fp16 is available we use that + revision = 'fp16' if len(revisions) > 1 and 'fp16' in revisions else revisions[0] + logger.info(f'Migrating {repo_id}, revision {revision}') + try: + pipeline = StableDiffusionPipeline.from_pretrained( + repo_id, + revision=revision, + **kwargs) + info = ModelProbe().heuristic_probe(pipeline) + if not info: + continue + dest = Path(dest_dir, info.base_type.value, info.model_type.value, f'{repo_name}') + pipeline.save_pretrained(dest, safe_serialization=True) + rel_path = Path('models',dest.relative_to(dest_dir)) + write_yaml(repo_name, path=rel_path, info=info, dest_yaml=dest_yaml) + except KeyboardInterrupt: + raise + except Exception as e: + logger.warning(f'Could not load the "{revision}" version of {repo_id}. Skipping.') def migrate_checkpoints(dest_dir: Path, dest_yaml: io.TextIOBase): # find any checkpoints referred to in old models.yaml @@ -218,6 +260,7 @@ def migrate_checkpoints(dest_dir: Path, dest_yaml: io.TextIOBase): dest = Path(dest_dir, info.base_type.value, info.model_type.value,weights.name) copy_file(weights,dest) weights = Path('models', info.base_type.value, info.model_type.value,weights.name) + model_name = unique_name(model_name, info) stanza = { f'{info.base_type.value}/{info.model_type.value}/{model_name}': { @@ -261,15 +304,16 @@ def main(): os.chdir(root_directory) with open(dest_yaml,'w') as yaml_file: - print(yaml.dump({'__metadata__': - {'version':'3.0.0'} - } - ),file=yaml_file,end="" - ) + yaml_file.write(yaml.dump({'__metadata__': + {'version':'3.0.0'} + } + ) + ) create_directory_structure(dest_directory) migrate_support_models(dest_directory) migrate_conversion_models(dest_directory) migrate_tuning_models(dest_directory) + migrate_converted(dest_directory,yaml_file) migrate_pipelines(dest_directory,yaml_file) migrate_checkpoints(dest_directory,yaml_file) From f28d50070e14374946dfc335d08cc75220ce4bbe Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 16 Jun 2023 22:54:36 -0400 Subject: [PATCH 02/47] configure/install basically working; needs edge case testing --- .../backend/install/invokeai_configure.py | 33 +- .../backend/install/model_install_backend.py | 618 ++++++++---------- invokeai/backend/model_management/__init__.py | 1 + .../backend/model_management/model_manager.py | 7 +- .../backend/model_management/model_probe.py | 34 +- .../backend/model_management/models/base.py | 1 - invokeai/configs/INITIAL_MODELS.yaml | 3 +- .../v2-inpainting-inference-v.yaml | 159 +++++ .../v2-inpainting-inference.yaml | 158 +++++ invokeai/frontend/install/model_install.py | 238 ++----- invokeai/frontend/install/widgets.py | 29 +- scripts/migrate_models_to_3.0.py | 8 +- 12 files changed, 701 insertions(+), 588 deletions(-) create mode 100644 invokeai/configs/stable-diffusion/v2-inpainting-inference-v.yaml create mode 100644 invokeai/configs/stable-diffusion/v2-inpainting-inference.yaml diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index cdb3f47755..582b24cbfa 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -56,11 +56,10 @@ from invokeai.frontend.install.widgets import ( ) from invokeai.backend.install.legacy_arg_parsing import legacy_parser from invokeai.backend.install.model_install_backend import ( - default_dataset, - download_from_hf, + hf_download_from_pretrained, hf_download_with_resume, - recommended_datasets, - UserSelections, + InstallSelections, + ModelInstall, ) from invokeai.backend.model_management.model_probe import ( ModelProbe, ModelType, BaseModelType, SchedulerPredictionType @@ -198,8 +197,8 @@ def download_conversion_models(): # sd-1 repo_id = 'openai/clip-vit-large-patch14' - download_from_hf(CLIPTokenizer, repo_id, target_dir / 'clip-vit-large-patch14') - download_from_hf(CLIPTextModel, repo_id, target_dir / 'clip-vit-large-patch14') + hf_download_from_pretrained(CLIPTokenizer, repo_id, target_dir / 'clip-vit-large-patch14') + hf_download_from_pretrained(CLIPTextModel, repo_id, target_dir / 'clip-vit-large-patch14') # sd-2 repo_id = "stabilityai/stable-diffusion-2" @@ -275,8 +274,8 @@ def download_clipseg(): logger.info("Installing clipseg model for text-based masking...") CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined" try: - download_from_hf(AutoProcessor, CLIPSEG_MODEL, config.root_path / 'models/core/misc/clipseg') - download_from_hf(CLIPSegForImageSegmentation, CLIPSEG_MODEL,'models/core/misc/clipseg') + hf_download_from_pretrained(AutoProcessor, CLIPSEG_MODEL, config.root_path / 'models/core/misc/clipseg') + hf_download_from_pretrained(CLIPSegForImageSegmentation, CLIPSEG_MODEL, config.root_path / 'models/core/misc/clipseg') except Exception: logger.info("Error installing clipseg model:") logger.info(traceback.format_exc()) @@ -592,7 +591,7 @@ class EditOptApplication(npyscreen.NPSAppManaged): self.program_opts = program_opts self.invokeai_opts = invokeai_opts self.user_cancelled = False - self.user_selections = default_user_selections(program_opts) + self.install_selections = default_user_selections(program_opts) def onStart(self): npyscreen.setTheme(npyscreen.Themes.DefaultTheme) @@ -627,19 +626,19 @@ def default_startup_options(init_file: Path) -> Namespace: opts.nsfw_checker = True return opts -def default_user_selections(program_opts: Namespace) -> UserSelections: - return UserSelections( - install_models=default_dataset() +def default_user_selections(program_opts: Namespace) -> InstallSelections: + installer = ModelInstall(config) + models = installer.all_models() + return InstallSelections( + install_models=[models[installer.default_model()].path or models[installer.default_model()].repo_id] if program_opts.default_only - else recommended_datasets() + else [models[x].path or models[x].repo_id for x in installer.recommended_models()] if program_opts.yes_to_all - else dict(), - purge_deleted_models=False, + else list(), scan_directory=None, autoscan_on_startup=None, ) - # ------------------------------------- def initialize_rootdir(root: Path, yes_to_all: bool = False): logger.info("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **") @@ -696,7 +695,7 @@ def run_console_ui( if editApp.user_cancelled: return (None, None) else: - return (editApp.new_opts, editApp.user_selections) + return (editApp.new_opts, editApp.install_selections) # ------------------------------------- diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 60f2d89748..54e5cdc1d8 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -2,18 +2,18 @@ Utility (backend) functions used by model_install.py """ import os -import re import shutil import sys +import traceback import warnings from dataclasses import dataclass,field from pathlib import Path -from tempfile import TemporaryFile -from typing import List, Dict, Set, Callable +from tempfile import TemporaryDirectory +from typing import List, Dict, Callable, Union, Set import requests -from diffusers import AutoencoderKL -from huggingface_hub import hf_hub_url, HfFolder +from diffusers import AutoencoderKL, StableDiffusionPipeline +from huggingface_hub import hf_hub_url, HfFolder, HfApi from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig from tqdm import tqdm @@ -21,7 +21,9 @@ from tqdm import tqdm import invokeai.configs as configs from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.model_management import ModelManager, ModelType, BaseModelType +from invokeai.backend.model_management import ModelManager, ModelType, BaseModelType, ModelVariantType +from invokeai.backend.model_management.model_probe import ModelProbe, SchedulerPredictionType, ModelProbeInfo +from invokeai.backend.util import download_with_resume from ..stable_diffusion import StableDiffusionGeneratorPipeline from ..util.logging import InvokeAILogger @@ -29,19 +31,11 @@ warnings.filterwarnings("ignore") # --------------------------globals----------------------- config = InvokeAIAppConfig.get_config() - -Model_dir = "models" -Weights_dir = "ldm/stable-diffusion-v1/" +logger = InvokeAILogger.getLogger(name='InvokeAI') # the initial "configs" dir is now bundled in the `invokeai.configs` package Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml" -# initial models omegaconf -Datasets = None - -# logger -logger = InvokeAILogger.getLogger(name='InvokeAI') - Config_preamble = """ # This file describes the alternative machine learning models # available to InvokeAI script. @@ -52,6 +46,24 @@ Config_preamble = """ # was trained on. """ +LEGACY_CONFIGS = { + BaseModelType.StableDiffusion1: { + ModelVariantType.Normal: 'v1-inference.yaml', + ModelVariantType.Inpaint: 'v1-inpainting-inference.yaml', + }, + + BaseModelType.StableDiffusion2: { + ModelVariantType.Normal: { + SchedulerPredictionType.Epsilon: 'v2-inference.yaml', + SchedulerPredictionType.VPrediction: 'v2-inference-v.yaml', + }, + ModelVariantType.Inpaint: { + SchedulerPredictionType.Epsilon: 'v2-inpainting-inference.yaml', + SchedulerPredictionType.VPrediction: 'v2-inpainting-inference-v.yaml', + } + } +} + @dataclass class ModelInstallList: '''Class for listing models to be installed/removed''' @@ -59,18 +71,11 @@ class ModelInstallList: remove_models: List[str] = field(default_factory=list) @dataclass -class UserSelections(): +class InstallSelections(): install_models: List[str]= field(default_factory=list) remove_models: List[str]=field(default_factory=list) - install_cn_models: List[str] = field(default_factory=list) - remove_cn_models: List[str] = field(default_factory=list) - install_lora_models: List[str] = field(default_factory=list) - remove_lora_models: List[str] = field(default_factory=list) - install_ti_models: List[str] = field(default_factory=list) - remove_ti_models: List[str] = field(default_factory=list) scan_directory: Path = None autoscan_on_startup: bool=False - import_model_paths: str=None @dataclass class ModelLoadInfo(): @@ -82,18 +87,30 @@ class ModelLoadInfo(): description: str = '' installed: bool = False recommended: bool = False - + default: bool = False + class ModelInstall(object): - def __init__(self,config:InvokeAIAppConfig): + def __init__(self, + config:InvokeAIAppConfig, + prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None, + access_token:str = None): self.config = config self.mgr = ModelManager(config.model_conf_path) self.datasets = OmegaConf.load(Dataset_path) + self.prediction_helper = prediction_type_helper + self.access_token = access_token or HfFolder.get_token() + self.reverse_paths = self._reverse_paths(self.datasets) def all_models(self)->Dict[str,ModelLoadInfo]: ''' - Return dict of model_key=>ModelStatus + Return dict of model_key=>ModelLoadInfo objects. + This method consolidates and simplifies the entries in both + models.yaml and INITIAL_MODELS.yaml so that they can + be treated uniformly. It also sorts the models alphabetically + by their name, to improve the display somewhat. ''' model_dict = dict() + # first populate with the entries in INITIAL_MODELS.yaml for key, value in self.datasets.items(): name,base,model_type = ModelManager.parse_key(key) @@ -128,102 +145,237 @@ class ModelInstall(object): if model_type==ModelType.Pipeline: models.add(key) return models + + def recommended_models(self)->Set[str]: + starters = self.starter_models() + return set([x for x in starters if self.datasets[x].get('recommended',False)]) + + def default_model(self)->str: + starters = self.starter_models() + defaults = [x for x in starters if self.datasets[x].get('default',False)] + return defaults[0] + + def install(self, selections: InstallSelections): + job = 1 + jobs = len(selections.remove_models) + len(selections.install_models) + if selections.scan_directory: + jobs += 1 - -def default_config_file(): - return config.model_conf_path - -def sd_configs(): - return config.legacy_conf_path - -def initial_models(): - global Datasets - if Datasets: - return Datasets - return (Datasets := OmegaConf.load(Dataset_path)['diffusers']) - -def add_models(model_manager, config_file_path: Path, models: List[tuple[str,str,str]]): - print(f'Installing {models}') - -def del_models(model_manager, config_file_path: Path, models: List[tuple[str,str,str]]): - for base, model_type, name in models: - logger.info(f"Deleting {name}...") - model_manager.del_model(name, base, model_type) - model_manager.commit(config_file_path) - -def install_requested_models( - diffusers: ModelInstallList = None, - controlnet: ModelInstallList = None, - lora: ModelInstallList = None, - ti: ModelInstallList = None, - cn_model_map: Dict[str,str] = None, # temporary - move to model manager - scan_directory: Path = None, - external_models: List[str] = None, - scan_at_startup: bool = False, - precision: str = "float16", - config_file_path: Path = None, - model_config_file_callback: Callable[[Path],Path] = None, -): - """ - Entry point for installing/deleting starter models, or installing external models. - """ - access_token = HfFolder.get_token() - config_file_path = config_file_path or default_config_file() - if not config_file_path.exists(): - open(config_file_path, "w") - - # prevent circular import here - from ..model_management import ModelManager - model_manager = ModelManager(OmegaConf.load(config_file_path), precision=precision) - - for x in [controlnet, lora, ti, diffusers]: - if x: - add_models(model_manager, config_file_path, x.install_models) - del_models(model_manager, config_file_path, x.remove_models) + # remove requested models + for key in selections.remove_models: + name,base,mtype = self.mgr.parse_key(key) + logger.info(f'Deleting {mtype} model {name} [{job}/{jobs}]') + self.mgr.del_model(name,base,mtype) + job += 1 - # if diffusers: + # add requested models + for path in selections.install_models: + logger.info(f'Installing {path} [{job}/{jobs}]') + self.heuristic_install(path) + job += 1 - # if diffusers.install_models and len(diffusers.install_models) > 0: - # logger.info("Installing requested models") - # downloaded_paths = download_weight_datasets( - # models=diffusers.install_models, - # access_token=None, - # precision=precision, - # ) - # successful = {x:v for x,v in downloaded_paths.items() if v is not None} - # if len(successful) > 0: - # update_config_file(successful, config_file_path) - # if len(successful) < len(diffusers.install_models): - # unsuccessful = [x for x in downloaded_paths if downloaded_paths[x] is None] - # logger.warning(f"Some of the model downloads were not successful: {unsuccessful}") + # import from the scan directory, if any + if path := selections.scan_directory: + logger.info(f'Scanning and importing models from directory {path} [{job}/{jobs}]') + self.heuristic_install(path) - # due to above, we have to reload the model manager because conf file - # was changed behind its back - model_manager = ModelManager(OmegaConf.load(config_file_path), precision=precision) + self.mgr.commit() - external_models = external_models or list() - if scan_directory: - external_models.append(str(scan_directory)) + if selections.autoscan_on_startup and Path(selections.scan_directory).is_dir(): + update_autoconvert_dir(selections.scan_directory) + else: + update_autoconvert_dir(None) - if len(external_models) > 0: - logger.info("INSTALLING EXTERNAL MODELS") - for path_url_or_repo in external_models: - logger.debug(path_url_or_repo) - try: - model_manager.heuristic_import( - path_url_or_repo, - commit_to_conf=config_file_path, - config_file_callback = model_config_file_callback, + def heuristic_install(self, model_path_id_or_url: Union[str,Path]): + # A little hack to allow nested routines to retrieve info on the requested ID + self.current_id = model_path_id_or_url + + path = Path(model_path_id_or_url) + + # checkpoint file, or similar + if path.is_file(): + self._install_path(path) + return + + # folders style or similar + if path.is_dir() and any([(path/x).exists() for x in ['config.json','model_index.json','learned_embeds.bin']]): + self._install_path(path) + return + + # recursive scan + if path.is_dir(): + for child in path.iterdir(): + self.heuristic_install(child) + return + + # huggingface repo + parts = str(path).split('/') + if len(parts) == 2: + self._install_repo(str(path)) + return + + # a URL + if model_path_id_or_url.startswith(("http:", "https:", "ftp:")): + self._install_url(model_path_id_or_url) + return + + logger.warning(f'{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping') + + # install a model from a local path. The optional info parameter is there to prevent + # the model from being probed twice in the event that it has already been probed. + def _install_path(self, path: Path, info: ModelProbeInfo=None): + try: + info = info or ModelProbe().heuristic_probe(path,self.prediction_helper) + if info.model_type == ModelType.Pipeline: + attributes = self._make_attributes(path,info) + self.mgr.add_model(model_name = path.stem if info.format=='checkpoint' else path.name, + base_model = info.base_type, + model_type = info.model_type, + model_attributes = attributes + ) + except Exception as e: + logger.warning(f'{str(e)} Skipping registration.') + + def _install_url(self, url: str): + # copy to a staging area, probe, import and delete + with TemporaryDirectory(dir=self.config.models_path) as staging: + location = download_with_resume(url,Path(staging)) + if not location: + logger.error(f'Unable to download {url}. Skipping.') + info = ModelProbe().heuristic_probe(location) + dest = self.config.models_path / info.base_type.value / info.model_type.value / location.name + models_path = shutil.move(location,dest) + + # staged version will be garbage-collected at this time + self._install_path(Path(models_path), info) + + def _get_model_name(self,path_name: str, location: Path)->str: + ''' + Calculate a name for the model - primitive implementation. + ''' + if key := self.reverse_paths.get(path_name): + (name, base, mtype) = ModelManager.parse_key(key) + return name + else: + return location.stem + + def _install_repo(self, repo_id: str): + hinfo = HfApi().model_info(repo_id) + + # we try to figure out how to download this most economically + # list all the files in the repo + files = [x.rfilename for x in hinfo.siblings] + + with TemporaryDirectory(dir=self.config.models_path) as staging: + staging = Path(staging) + if 'model_index.json' in files: + location = self._download_hf_pipeline(repo_id, staging) # pipeline + + elif 'pytorch_lora_weights.bin' in files: + location = self._download_hf_model(repo_id, ['pytorch_lora_weights.bin'], staging) # LoRA + + elif self.config.precision=='float16' and 'diffusion_pytorch_model.fp16.safetensors' in files: # vae, controlnet or some other standalone + files = ['config.json', 'diffusion_pytorch_model.fp16.safetensors'] + location = self._download_hf_model(repo_id, files, staging) + + elif 'diffusion_pytorch_model.safetensors' in files: + files = ['config.json', 'diffusion_pytorch_model.safetensors'] + location = self._download_hf_model(repo_id, files, staging) + + elif 'learned_embeds.bin' in files: + location = self._download_hf_model(repo_id, ['learned_embeds.bin'], staging) + + info = ModelProbe().heuristic_probe(location, self.prediction_helper) + dest = self.config.models_path / info.base_type.value / info.model_type.value / self._get_model_name(repo_id,location) + if dest.exists(): + shutil.rmtree(dest) + shutil.copytree(location,dest) + self._install_path(dest, info) + + def _make_attributes(self, path: Path, info: ModelProbeInfo)->dict: + + # convoluted way to retrieve the description from datasets + description = f'{info.base_type.value} {info.model_type.value} model' + if key := self.reverse_paths.get(self.current_id): + if key in self.datasets: + description = self.datasets[key]['description'] + + attributes = dict( + path = str(path), + description = str(description), + format = info.format, + ) + if info.model_type == ModelType.Pipeline: + attributes.update( + dict( + variant = info.variant_type, + ) + ) + if info.base_type == BaseModelType.StableDiffusion2: + attributes.update( + dict( + prediction_type = info.prediction_type, + upcast_attention = info.prediction_type == SchedulerPredictionType.VPrediction, + ) ) - except KeyboardInterrupt: - sys.exit(-1) - except Exception: - pass + if info.format=="checkpoint": + try: + legacy_conf = LEGACY_CONFIGS[info.base_type][info.variant_type][info.prediction_type] if BaseModelType.StableDiffusion2 \ + else LEGACY_CONFIGS[info.base_type][info.variant_type] + except KeyError: + legacy_conf = 'v1-inference.yaml' # best guess + + attributes.update( + dict( + config = str(self.config.legacy_conf_path / legacy_conf) + ) + ) + return attributes - if scan_at_startup and scan_directory.is_dir(): - update_autoconvert_dir(scan_directory) - else: - update_autoconvert_dir(None) + def _download_hf_pipeline(self, repo_id: str, staging: Path)->Path: + ''' + This retrieves a StableDiffusion model from cache or remote and then + does a save_pretrained() to the indicated staging area. + ''' + _,name = repo_id.split("/") + revisions = ['fp16','main'] if self.config.precision=='float16' else ['main'] + model = None + for revision in revisions: + try: + model = StableDiffusionPipeline.from_pretrained(repo_id,revision=revision,safety_checker=None) + except: # most errors are due to fp16 not being present. Fix this to catch other errors + pass + if model: + break + if not model: + logger.error(f'Diffusers model {repo_id} could not be downloaded. Skipping.') + return None + model.save_pretrained(staging / name, safe_serialization=True) + return staging / name + + def _download_hf_model(self, repo_id: str, files: List[str], staging: Path)->Path: + _,name = repo_id.split("/") + location = staging / name + paths = list() + for filename in files: + p = hf_download_with_resume(repo_id, + model_dir=location, + model_name=filename, + access_token = self.access_token + ) + if p: + paths.append(p) + else: + logger.warning(f'Could not download {filename} from {repo_id}.') + + return location if len(paths)>0 else None + + @classmethod + def _reverse_paths(cls,datasets)->dict: + ''' + Reverse mapping from repo_id/path to destination name. + ''' + return {v.get('path') or v.get('repo_id') : k for k, v in datasets.items()} def update_autoconvert_dir(autodir: Path): ''' @@ -249,89 +401,7 @@ def yes_or_no(prompt: str, default_yes=True): return response[0] in ("y", "Y") # --------------------------------------------- -def recommended_datasets() -> List['str']: - datasets = set() - for ds in initial_models().keys(): - if initial_models()[ds].get("recommended", False): - datasets.add(ds) - return list(datasets) - -# --------------------------------------------- -def default_dataset() -> dict: - datasets = set() - for ds in initial_models().keys(): - if initial_models()[ds].get("default", False): - datasets.add(ds) - return list(datasets) - - -# --------------------------------------------- -def all_datasets() -> dict: - datasets = dict() - for ds in initial_models().keys(): - datasets[ds] = True - return datasets - - -# --------------------------------------------- -# look for legacy model.ckpt in models directory and offer to -# normalize its name -def migrate_models_ckpt(): - model_path = os.path.join(config.root_dir, Model_dir, Weights_dir) - if not os.path.exists(os.path.join(model_path, "model.ckpt")): - return - new_name = initial_models()["stable-diffusion-1.4"]["file"] - logger.warning( - 'The Stable Diffusion v4.1 "model.ckpt" is already installed. The name will be changed to {new_name} to avoid confusion.' - ) - logger.warning(f"model.ckpt => {new_name}") - os.replace( - os.path.join(model_path, "model.ckpt"), os.path.join(model_path, new_name) - ) - - -# --------------------------------------------- -def download_weight_datasets( - models: List[str], access_token: str, precision: str = "float32" -): - migrate_models_ckpt() - successful = dict() - for mod in models: - logger.info(f"Downloading {mod}:") - successful[mod] = _download_repo_or_file( - initial_models()[mod], access_token, precision=precision - ) - return successful - - -def _download_repo_or_file( - mconfig: DictConfig, access_token: str, precision: str = "float32" -) -> Path: - path = None - if mconfig["format"] == "ckpt": - path = _download_ckpt_weights(mconfig, access_token) - else: - path = _download_diffusion_weights(mconfig, access_token, precision=precision) - if "vae" in mconfig and "repo_id" in mconfig["vae"]: - _download_diffusion_weights( - mconfig["vae"], access_token, precision=precision - ) - return path - -def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path: - repo_id = mconfig["repo_id"] - filename = mconfig["file"] - cache_dir = os.path.join(config.root_dir, Model_dir, Weights_dir) - return hf_download_with_resume( - repo_id=repo_id, - model_dir=cache_dir, - model_name=filename, - access_token=access_token, - ) - - -# --------------------------------------------- -def download_from_hf( +def hf_download_from_pretrained( model_class: object, model_name: str, destination: Path, **kwargs ): logger = InvokeAILogger.getLogger('InvokeAI') @@ -345,35 +415,6 @@ def download_from_hf( model.save_pretrained(destination, safe_serialization=True) return destination -def _download_diffusion_weights( - mconfig: DictConfig, access_token: str, precision: str = "float32" -): - repo_id = mconfig["repo_id"] - model_class = ( - StableDiffusionGeneratorPipeline - if mconfig.get("format", None) == "diffusers" - else AutoencoderKL - ) - extra_arg_list = [{"revision": "fp16"}, {}] if precision == "float16" else [{}] - path = None - for extra_args in extra_arg_list: - try: - path = download_from_hf( - model_class, - repo_id, - safety_checker=None, - **extra_args, - ) - except OSError as e: - if 'Revision Not Found' in str(e): - pass - else: - logger.error(str(e)) - if path: - break - return path - - # --------------------------------------------- def hf_download_with_resume( repo_id: str, @@ -432,128 +473,3 @@ def hf_download_with_resume( return model_dest -# --------------------------------------------- -def update_config_file(successfully_downloaded: dict, config_file: Path): - config_file = ( - Path(config_file) if config_file is not None else default_config_file() - ) - - # In some cases (incomplete setup, etc), the default configs directory might be missing. - # Create it if it doesn't exist. - # this check is ignored if opt.config_file is specified - user is assumed to know what they - # are doing if they are passing a custom config file from elsewhere. - if config_file is default_config_file() and not config_file.parent.exists(): - configs_src = Dataset_path.parent - configs_dest = default_config_file().parent - shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True) - - yaml = new_config_file_contents(successfully_downloaded, config_file) - - try: - backup = None - if os.path.exists(config_file): - logger.warning( - f"{config_file.name} exists. Renaming to {config_file.stem}.yaml.orig" - ) - backup = config_file.with_suffix(".yaml.orig") - ## Ugh. Windows is unable to overwrite an existing backup file, raises a WinError 183 - if sys.platform == "win32" and backup.is_file(): - backup.unlink() - config_file.rename(backup) - - with TemporaryFile() as tmp: - tmp.write(Config_preamble.encode()) - tmp.write(yaml.encode()) - - with open(str(config_file.expanduser().resolve()), "wb") as new_config: - tmp.seek(0) - new_config.write(tmp.read()) - - except Exception as e: - logger.error(f"Error creating config file {config_file}: {str(e)}") - if backup is not None: - logger.info("restoring previous config file") - ## workaround, for WinError 183, see above - if sys.platform == "win32" and config_file.is_file(): - config_file.unlink() - backup.rename(config_file) - return - - logger.info(f"Successfully created new configuration file {config_file}") - - -# --------------------------------------------- -def new_config_file_contents( - successfully_downloaded: dict, - config_file: Path, -) -> str: - if config_file.exists(): - conf = OmegaConf.load(str(config_file.expanduser().resolve())) - else: - conf = OmegaConf.create() - - default_selected = None - for model in successfully_downloaded: - # a bit hacky - what we are doing here is seeing whether a checkpoint - # version of the model was previously defined, and whether the current - # model is a diffusers (indicated with a path) - if conf.get(model) and Path(successfully_downloaded[model]).is_dir(): - delete_weights(model, conf[model]) - - stanza = {} - mod = initial_models()[model] - stanza["description"] = mod["description"] - stanza["repo_id"] = mod["repo_id"] - stanza["format"] = mod["format"] - # diffusers don't need width and height (probably .ckpt doesn't either) - # so we no longer require these in INITIAL_MODELS.yaml - if "width" in mod: - stanza["width"] = mod["width"] - if "height" in mod: - stanza["height"] = mod["height"] - if "file" in mod: - stanza["weights"] = os.path.relpath( - successfully_downloaded[model], start=config.root_dir - ) - stanza["config"] = os.path.normpath( - os.path.join(sd_configs(), mod["config"]) - ) - if "vae" in mod: - if "file" in mod["vae"]: - stanza["vae"] = os.path.normpath( - os.path.join(Model_dir, Weights_dir, mod["vae"]["file"]) - ) - else: - stanza["vae"] = mod["vae"] - if mod.get("default", False): - stanza["default"] = True - default_selected = True - - conf[model] = stanza - - # if no default model was chosen, then we select the first - # one in the list - if not default_selected: - conf[list(successfully_downloaded.keys())[0]]["default"] = True - - return OmegaConf.to_yaml(conf) - - -# --------------------------------------------- -def delete_weights(model_name: str, conf_stanza: dict): - if not (weights := conf_stanza.get("weights")): - return - if re.match("/VAE/", conf_stanza.get("config")): - return - - logger.warning( - f"\nThe checkpoint version of {model_name} is superseded by the diffusers version. Deleting the original file {weights}?" - ) - - weights = Path(weights) - if not weights.is_absolute(): - weights = config.root_dir / weights - try: - weights.unlink() - except OSError as e: - logger.error(str(e)) diff --git a/invokeai/backend/model_management/__init__.py b/invokeai/backend/model_management/__init__.py index aea7b417a1..fb3b20a20a 100644 --- a/invokeai/backend/model_management/__init__.py +++ b/invokeai/backend/model_management/__init__.py @@ -4,3 +4,4 @@ Initialization file for invokeai.backend.model_management from .model_manager import ModelManager, ModelInfo from .model_cache import ModelCache from .models import BaseModelType, ModelType, SubModelType, ModelVariantType + diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 79c6573f4f..7a7a765fd3 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -682,7 +682,7 @@ class ModelManager(object): for model_key, model_config in list(self.models.items()): model_name, base_model, model_type = self.parse_key(model_key) - model_path = str(self.globals.root / model_config.path) + model_path = str(self.globals.root_path / model_config.path) if not os.path.exists(model_path): model_class = MODEL_CLASSES[base_model][model_type] if model_class.save_to_config: @@ -703,13 +703,14 @@ class ModelManager(object): for entry_name in os.listdir(models_dir): model_path = os.path.join(models_dir, entry_name) if model_path not in loaded_files: # TODO: check - model_name = Path(model_path).stem + model_path = Path(model_path) + model_name = model_path.name if model_path.is_dir else model_path.stem model_key = self.create_key(model_name, base_model, model_type) if model_key in self.models: raise Exception(f"Model with key {model_key} added twice") - model_config: ModelConfigBase = model_class.probe_config(model_path) + model_config: ModelConfigBase = model_class.probe_config(str(model_path)) self.models[model_key] = model_config new_models_found = True diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py index 54fac5cde1..59e0c8e970 100644 --- a/invokeai/backend/model_management/model_probe.py +++ b/invokeai/backend/model_management/model_probe.py @@ -15,13 +15,13 @@ import invokeai.backend.util.logging as logger from .models import BaseModelType, ModelType, ModelVariantType, SchedulerPredictionType, SilenceWarnings @dataclass -class ModelVariantInfo(object): +class ModelProbeInfo(object): model_type: ModelType base_type: BaseModelType variant_type: ModelVariantType prediction_type: SchedulerPredictionType upcast_attention: bool - format: Literal['folder','checkpoint'] + format: Literal['diffusers','checkpoint'] image_size: int class ProbeBase(object): @@ -31,7 +31,7 @@ class ProbeBase(object): class ModelProbe(object): PROBES = { - 'folder': { }, + 'diffusers': { }, 'checkpoint': { }, } @@ -43,7 +43,7 @@ class ModelProbe(object): @classmethod def register_probe(cls, - format: Literal['folder','file'], + format: Literal['diffusers','checkpoint'], model_type: ModelType, probe_class: ProbeBase): cls.PROBES[format][model_type] = probe_class @@ -51,8 +51,8 @@ class ModelProbe(object): @classmethod def heuristic_probe(cls, model: Union[Dict, ModelMixin, Path], - prediction_type_helper: Callable[[Path],BaseModelType]=None, - )->ModelVariantInfo: + prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None, + )->ModelProbeInfo: if isinstance(model,Path): return cls.probe(model_path=model,prediction_type_helper=prediction_type_helper) elif isinstance(model,(dict,ModelMixin,ConfigMixin)): @@ -64,7 +64,7 @@ class ModelProbe(object): def probe(cls, model_path: Path, model: Union[Dict, ModelMixin] = None, - prediction_type_helper: Callable[[Path],BaseModelType] = None)->ModelVariantInfo: + prediction_type_helper: Callable[[Path],SchedulerPredictionType] = None)->ModelProbeInfo: ''' Probe the model at model_path and return sufficient information about it to place it somewhere in the models directory hierarchy. If the model is @@ -74,14 +74,14 @@ class ModelProbe(object): between V2-Base and V2-768 SD models. ''' if model_path: - format = 'folder' if model_path.is_dir() else 'checkpoint' + format = 'diffusers' if model_path.is_dir() else 'checkpoint' else: - format = 'folder' if isinstance(model,(ConfigMixin,ModelMixin)) else 'checkpoint' + format = 'diffusers' if isinstance(model,(ConfigMixin,ModelMixin)) else 'checkpoint' model_info = None try: model_type = cls.get_model_type_from_folder(model_path, model) \ - if format == 'folder' \ + if format == 'diffusers' \ else cls.get_model_type_from_checkpoint(model_path, model) probe_class = cls.PROBES[format].get(model_type) if not probe_class: @@ -90,7 +90,7 @@ class ModelProbe(object): base_type = probe.get_base_type() variant_type = probe.get_variant_type() prediction_type = probe.get_scheduler_prediction_type() - model_info = ModelVariantInfo( + model_info = ModelProbeInfo( model_type = model_type, base_type = base_type, variant_type = variant_type, @@ -196,7 +196,7 @@ class CheckpointProbeBase(ProbeBase): def __init__(self, checkpoint_path: Path, checkpoint: dict, - helper: Callable[[Path],BaseModelType] = None + helper: Callable[[Path],SchedulerPredictionType] = None )->BaseModelType: self.checkpoint = checkpoint or ModelProbe._scan_and_load_checkpoint(checkpoint_path) self.checkpoint_path = checkpoint_path @@ -405,11 +405,11 @@ class LoRAFolderProbe(FolderProbeBase): pass ############## register probe classes ###### -ModelProbe.register_probe('folder', ModelType.Pipeline, PipelineFolderProbe) -ModelProbe.register_probe('folder', ModelType.Vae, VaeFolderProbe) -ModelProbe.register_probe('folder', ModelType.Lora, LoRAFolderProbe) -ModelProbe.register_probe('folder', ModelType.TextualInversion, TextualInversionFolderProbe) -ModelProbe.register_probe('folder', ModelType.ControlNet, ControlNetFolderProbe) +ModelProbe.register_probe('diffusers', ModelType.Pipeline, PipelineFolderProbe) +ModelProbe.register_probe('diffusers', ModelType.Vae, VaeFolderProbe) +ModelProbe.register_probe('diffusers', ModelType.Lora, LoRAFolderProbe) +ModelProbe.register_probe('diffusers', ModelType.TextualInversion, TextualInversionFolderProbe) +ModelProbe.register_probe('diffusers', ModelType.ControlNet, ControlNetFolderProbe) ModelProbe.register_probe('checkpoint', ModelType.Pipeline, PipelineCheckpointProbe) ModelProbe.register_probe('checkpoint', ModelType.Vae, VaeCheckpointProbe) ModelProbe.register_probe('checkpoint', ModelType.Lora, LoRACheckpointProbe) diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py index 3bf0045918..f18099b4e7 100644 --- a/invokeai/backend/model_management/models/base.py +++ b/invokeai/backend/model_management/models/base.py @@ -154,7 +154,6 @@ class ModelBase(metaclass=ABCMeta): def create_config(cls, **kwargs) -> ModelConfigBase: if "format" not in kwargs: raise Exception("Field 'format' not found in model config") - configs = cls._get_configs() return configs[kwargs["format"]](**kwargs) diff --git a/invokeai/configs/INITIAL_MODELS.yaml b/invokeai/configs/INITIAL_MODELS.yaml index ccb7ca09aa..cb16f3ed4b 100644 --- a/invokeai/configs/INITIAL_MODELS.yaml +++ b/invokeai/configs/INITIAL_MODELS.yaml @@ -3,6 +3,7 @@ sd-1/pipeline/stable-diffusion-v1-5: description: Stable Diffusion version 1.5 diffusers model (4.27 GB) repo_id: runwayml/stable-diffusion-v1-5 recommended: True + default: True sd-1/pipeline/stable-diffusion-inpainting: description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB) repo_id: runwayml/stable-diffusion-inpainting @@ -27,7 +28,7 @@ sd-1/pipeline/Dungeons-and-Diffusion: description: Dungeons & Dragons characters (2.13 GB) repo_id: 0xJustin/Dungeons-and-Diffusion recommended: False -sd-1/pipeline/dreamlike-photoreal-2.0: +sd-1/pipeline/dreamlike-photoreal-2: description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB) repo_id: dreamlike-art/dreamlike-photoreal-2.0 recommended: False diff --git a/invokeai/configs/stable-diffusion/v2-inpainting-inference-v.yaml b/invokeai/configs/stable-diffusion/v2-inpainting-inference-v.yaml new file mode 100644 index 0000000000..37cda460aa --- /dev/null +++ b/invokeai/configs/stable-diffusion/v2-inpainting-inference-v.yaml @@ -0,0 +1,159 @@ +model: + base_learning_rate: 5.0e-05 + target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + parameterization: "v" + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: hybrid + scale_factor: 0.18215 + monitor: val/loss_simple_ema + finetune_keys: null + use_ema: False + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + image_size: 32 # unused + in_channels: 9 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 # need to fix for flash-attn + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + #attn_type: "vanilla-xformers" + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [ ] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + params: + freeze: True + layer: "penultimate" + + +data: + target: ldm.data.laion.WebDataModuleFromConfig + params: + tar_base: null # for concat as in LAION-A + p_unsafe_threshold: 0.1 + filter_word_list: "data/filters.yaml" + max_pwatermark: 0.45 + batch_size: 8 + num_workers: 6 + multinode: True + min_size: 512 + train: + shards: + - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-0/{00000..18699}.tar -" + - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-1/{00000..18699}.tar -" + - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-2/{00000..18699}.tar -" + - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-3/{00000..18699}.tar -" + - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-4/{00000..18699}.tar -" #{00000-94333}.tar" + shuffle: 10000 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 512 + interpolation: 3 + - target: torchvision.transforms.RandomCrop + params: + size: 512 + postprocess: + target: ldm.data.laion.AddMask + params: + mode: "512train-large" + p_drop: 0.25 + # NOTE use enough shards to avoid empty validation loops in workers + validation: + shards: + - "pipe:aws s3 cp s3://deep-floyd-s3/datasets/laion_cleaned-part5/{93001..94333}.tar - " + shuffle: 0 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 512 + interpolation: 3 + - target: torchvision.transforms.CenterCrop + params: + size: 512 + postprocess: + target: ldm.data.laion.AddMask + params: + mode: "512train-large" + p_drop: 0.25 + +lightning: + find_unused_parameters: True + modelcheckpoint: + params: + every_n_train_steps: 5000 + + callbacks: + metrics_over_trainsteps_checkpoint: + params: + every_n_train_steps: 10000 + + image_logger: + target: main.ImageLogger + params: + enable_autocast: False + disabled: False + batch_frequency: 1000 + max_images: 4 + increase_log_steps: False + log_first_step: False + log_images_kwargs: + use_ema_scope: False + inpaint: False + plot_progressive_rows: False + plot_diffusion_rows: False + N: 4 + unconditional_guidance_scale: 5.0 + unconditional_guidance_label: [""] + ddim_steps: 50 # todo check these out for depth2img, + ddim_eta: 0.0 # todo check these out for depth2img, + + trainer: + benchmark: True + val_check_interval: 5000000 + num_sanity_val_steps: 0 + accumulate_grad_batches: 1 \ No newline at end of file diff --git a/invokeai/configs/stable-diffusion/v2-inpainting-inference.yaml b/invokeai/configs/stable-diffusion/v2-inpainting-inference.yaml new file mode 100644 index 0000000000..5aaf13162d --- /dev/null +++ b/invokeai/configs/stable-diffusion/v2-inpainting-inference.yaml @@ -0,0 +1,158 @@ +model: + base_learning_rate: 5.0e-05 + target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: hybrid + scale_factor: 0.18215 + monitor: val/loss_simple_ema + finetune_keys: null + use_ema: False + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + image_size: 32 # unused + in_channels: 9 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 # need to fix for flash-attn + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + #attn_type: "vanilla-xformers" + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [ ] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + params: + freeze: True + layer: "penultimate" + + +data: + target: ldm.data.laion.WebDataModuleFromConfig + params: + tar_base: null # for concat as in LAION-A + p_unsafe_threshold: 0.1 + filter_word_list: "data/filters.yaml" + max_pwatermark: 0.45 + batch_size: 8 + num_workers: 6 + multinode: True + min_size: 512 + train: + shards: + - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-0/{00000..18699}.tar -" + - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-1/{00000..18699}.tar -" + - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-2/{00000..18699}.tar -" + - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-3/{00000..18699}.tar -" + - "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-4/{00000..18699}.tar -" #{00000-94333}.tar" + shuffle: 10000 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 512 + interpolation: 3 + - target: torchvision.transforms.RandomCrop + params: + size: 512 + postprocess: + target: ldm.data.laion.AddMask + params: + mode: "512train-large" + p_drop: 0.25 + # NOTE use enough shards to avoid empty validation loops in workers + validation: + shards: + - "pipe:aws s3 cp s3://deep-floyd-s3/datasets/laion_cleaned-part5/{93001..94333}.tar - " + shuffle: 0 + image_key: jpg + image_transforms: + - target: torchvision.transforms.Resize + params: + size: 512 + interpolation: 3 + - target: torchvision.transforms.CenterCrop + params: + size: 512 + postprocess: + target: ldm.data.laion.AddMask + params: + mode: "512train-large" + p_drop: 0.25 + +lightning: + find_unused_parameters: True + modelcheckpoint: + params: + every_n_train_steps: 5000 + + callbacks: + metrics_over_trainsteps_checkpoint: + params: + every_n_train_steps: 10000 + + image_logger: + target: main.ImageLogger + params: + enable_autocast: False + disabled: False + batch_frequency: 1000 + max_images: 4 + increase_log_steps: False + log_first_step: False + log_images_kwargs: + use_ema_scope: False + inpaint: False + plot_progressive_rows: False + plot_diffusion_rows: False + N: 4 + unconditional_guidance_scale: 5.0 + unconditional_guidance_label: [""] + ddim_steps: 50 # todo check these out for depth2img, + ddim_eta: 0.0 # todo check these out for depth2img, + + trainer: + benchmark: True + val_check_interval: 5000000 + num_sanity_val_steps: 0 + accumulate_grad_batches: 1 \ No newline at end of file diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 1753364f64..80ddebca84 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -11,7 +11,6 @@ The work is actually done in backend code in model_install_backend.py. import argparse import curses -import os import sys import textwrap import traceback @@ -20,27 +19,21 @@ from multiprocessing import Process from multiprocessing.connection import Connection, Pipe from pathlib import Path from shutil import get_terminal_size -from typing import List import logging import npyscreen import torch from npyscreen import widget -from omegaconf import OmegaConf from invokeai.backend.util.logging import InvokeAILogger from invokeai.backend.install.model_install_backend import ( - Dataset_path, # most of these should go!! - default_config_file, - default_dataset, - install_requested_models, - recommended_datasets, ModelInstallList, - UserSelections, - ModelInstall + InstallSelections, + ModelInstall, + SchedulerPredictionType, ) -from invokeai.backend.model_management import ModelManager, BaseModelType, ModelType +from invokeai.backend.model_management import ModelManager, ModelType from invokeai.backend.util import choose_precision, choose_torch_device from invokeai.frontend.install.widgets import ( CenteredTitleText, @@ -133,7 +126,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): bottom_of_table = self.nextrely self.nextrely = top_of_table - self.pipeline_models = self.add_model_widgets( + self.pipeline_models = self.add_pipeline_widgets( model_type=ModelType.Pipeline, window_width=window_width, exclude = self.starter_models @@ -210,11 +203,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): starters = self.starter_models starter_model_labels = self.model_labels - recommended_models = set([ - x - for x in starters - if models[x].recommended - ]) self.installed_models = sorted( [x for x in starters if models[x].installed] ) @@ -312,16 +300,18 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): return widgets ### Tab for arbitrary diffusers widgets ### - def add_diffusers_widgets(self, - model_type: ModelType=ModelType.Pipeline, - window_width: int=120, - )->dict[str,npyscreen.widget]: + def add_pipeline_widgets(self, + model_type: ModelType=ModelType.Pipeline, + window_width: int=120, + **kwargs, + )->dict[str,npyscreen.widget]: '''Similar to add_model_widgets() but adds some additional widgets at the bottom to support the autoload directory''' widgets = self.add_model_widgets( model_type = model_type, window_width = window_width, install_prompt=f"Additional {model_type.value.title()} models already installed.", + **kwargs, ) label = "Directory to scan for models to automatically import ( autocompletes):" @@ -428,7 +418,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): target = process_and_execute, kwargs=dict( opt = app.program_opts, - selections = app.user_selections, + selections = app.install_selections, conn_out = child_conn, ) ) @@ -436,8 +426,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): child_conn.close() self.subprocess_connection = parent_conn self.subprocess = p - app.user_selections = UserSelections() - # process_and_execute(app.opt, app.user_selections) + app.install_selections = InstallSelections() + # process_and_execute(app.opt, app.install_selections) def on_back(self): self.parentApp.switchFormPrevious() @@ -453,7 +443,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self.parentApp.setNextForm(None) self.parentApp.user_cancelled = False self.editing = False - + ########## This routine monitors the child process that is performing model installation and removal ##### def while_waiting(self): '''Called during idle periods. Main task is to update the Log Messages box with messages @@ -532,73 +522,24 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): .autoscan_on_startup: True if invokeai should scan and import at startup time .import_model_paths: list of URLs, repo_ids and file paths to import """ - # we're using a global here rather than storing the result in the parentapp - # due to some bug in npyscreen that is causing attributes to be lost - selections = self.parentApp.user_selections + selections = self.parentApp.install_selections + all_models = self.all_models - # Starter models to install/remove - # TO DO - turn these into a dict so we don't have to hard-code the attributes - print(f'installed={[x for x in self.all_models if self.all_models[x].installed]}',file=f) - for section in [self.starter_pipelines, self.pipeline_models, - self.controlnet_models, self.lora_models, self.ti_models]: + # Defined models (in INITIAL_CONFIG.yaml or models.yaml) to add/remove + ui_sections = [self.starter_pipelines, self.pipeline_models, + self.controlnet_models, self.lora_models, self.ti_models] + for section in ui_sections: selected = set([section['models'][x] for x in section['models_selected'].value]) models_to_install = [x for x in selected if not self.all_models[x].installed] models_to_remove = [x for x in section['models'] if x not in selected and self.all_models[x].installed] - - # "More" models - selections.import_model_paths = self.pipeline_models['download_ids'].value.split() - if diffusers_selected := self.pipeline_models.get('models_selected'): - selections.remove_models.extend([x - for x in diffusers_selected.values - if self.installed_pipeline_models[x] - and diffusers_selected.values.index(x) not in diffusers_selected.value - ] - ) - - # TODO: REFACTOR THIS REPETITIVE CODE - if cn_models_selected := self.controlnet_models.get('models_selected'): - selections.install_cn_models = [cn_models_selected.values[x] - for x in cn_models_selected.value - if not self.installed_cn_models[cn_models_selected.values[x]] - ] - selections.remove_cn_models = [x - for x in cn_models_selected.values - if self.installed_cn_models[x] - and cn_models_selected.values.index(x) not in cn_models_selected.value - ] - if (additional_cns := self.controlnet_models['download_ids'].value.split()): - valid_cns = [x for x in additional_cns if '/' in x] - selections.install_cn_models.extend(valid_cns) + selections.remove_models.extend(models_to_remove) + selections.install_models.extend(all_models[x].path or all_models[x].repo_id \ + for x in models_to_install if all_models[x].path or all_models[x].repo_id) - # same thing, for LoRAs - if loras_selected := self.lora_models.get('models_selected'): - selections.install_lora_models = [loras_selected.values[x] - for x in loras_selected.value - if not self.installed_lora_models[loras_selected.values[x]] - ] - selections.remove_lora_models = [x - for x in loras_selected.values - if self.installed_lora_models[x] - and loras_selected.values.index(x) not in loras_selected.value - ] - if (additional_loras := self.lora_models['download_ids'].value.split()): - selections.install_lora_models.extend(additional_loras) - - # same thing, for TIs - # TODO: refactor - if tis_selected := self.ti_models.get('models_selected'): - selections.install_ti_models = [tis_selected.values[x] - for x in tis_selected.value - if not self.installed_ti_models[tis_selected.values[x]] - ] - selections.remove_ti_models = [x - for x in tis_selected.values - if self.installed_ti_models[x] - and tis_selected.values.index(x) not in tis_selected.value - ] - - if (additional_tis := self.ti_models['download_ids'].value.split()): - selections.install_ti_models.extend(additional_tis) + # models located in the 'download_ids" section + for section in ui_sections: + if downloads := section.get('download_ids'): + selections.install_models.extend(downloads.value.split()) # load directory and whether to scan on startup selections.scan_directory = self.pipeline_models['autoload_directory'].value @@ -609,7 +550,7 @@ class AddModelApplication(npyscreen.NPSAppManaged): super().__init__() self.program_opts = opt self.user_cancelled = False - self.user_selections = UserSelections() + self.install_selections = InstallSelections() def onStart(self): npyscreen.setTheme(npyscreen.Themes.DefaultTheme) @@ -628,21 +569,17 @@ class StderrToMessage(): pass # -------------------------------------------------------- -def ask_user_for_config_file(model_path: Path, - tui_conn: Connection=None - )->Path: +def ask_user_for_prediction_type(model_path: Path, + tui_conn: Connection=None + )->Path: if tui_conn: logger.debug('Waiting for user response...') - return _ask_user_for_cf_tui(model_path, tui_conn) + return _ask_user_for_pt_tui(model_path, tui_conn) else: - return _ask_user_for_cf_cmdline(model_path) + return _ask_user_for_pt_cmdline(model_path) -def _ask_user_for_cf_cmdline(model_path): - choices = [ - config.legacy_conf_path / x - for x in ['v2-inference.yaml','v2-inference-v.yaml'] - ] - choices.extend([None]) +def _ask_user_for_pt_cmdline(model_path): + choices = [SchedulerPredictionType.Epsilon, SchedulerPredictionType.VPrediction, None] print( f""" Please select the type of the V2 checkpoint named {model_path.name}: @@ -664,7 +601,7 @@ Please select the type of the V2 checkpoint named {model_path.name}: return return choice -def _ask_user_for_cf_tui(model_path: Path, tui_conn: Connection)->Path: +def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection)->Path: try: tui_conn.send_bytes(f'*need v2 config for:{model_path}'.encode('utf-8')) # note that we don't do any status checking here @@ -672,20 +609,20 @@ def _ask_user_for_cf_tui(model_path: Path, tui_conn: Connection)->Path: if response is None: return None elif response == 'epsilon': - return config.legacy_conf_path / 'v2-inference.yaml' + return SchedulerPredictionType.epsilon elif response == 'v': - return config.legacy_conf_path / 'v2-inference-v.yaml' + return SchedulerPredictionType.VPrediction elif response == 'abort': logger.info('Conversion aborted') return None else: - return Path(response) + return response except: return None # -------------------------------------------------------- def process_and_execute(opt: Namespace, - selections: UserSelections, + selections: InstallSelections, conn_out: Connection=None, ): # set up so that stderr is sent to conn_out @@ -696,34 +633,14 @@ def process_and_execute(opt: Namespace, logger = InvokeAILogger.getLogger() logger.handlers.clear() logger.addHandler(logging.StreamHandler(translator)) - - models_to_install = selections.install_models - models_to_remove = selections.remove_models - directory_to_scan = selections.scan_directory - scan_at_startup = selections.autoscan_on_startup - potential_models_to_install = selections.import_model_paths - name_map = selections.model_name_map - install_requested_models( - diffusers = ModelInstallList(models_to_install, [name_map[ModelType.Pipeline][x] for x in models_to_remove]), - controlnet = ModelInstallList(selections.install_cn_models, [name_map[ModelType.ControlNet][x] for x in selections.remove_cn_models]), - lora = ModelInstallList(selections.install_lora_models, [name_map[ModelType.Lora][x] for x in selections.remove_lora_models]), - ti = ModelInstallList(selections.install_ti_models, [name_map[ModelType.TextualInversion][x] for x in selections.remove_ti_models]), - scan_directory=Path(directory_to_scan) if directory_to_scan else None, - external_models=potential_models_to_install, - scan_at_startup=scan_at_startup, - precision="float32" - if opt.full_precision - else choose_precision(torch.device(choose_torch_device())), - config_file_path=Path(opt.config_file) if opt.config_file else config.model_conf_path, - model_config_file_callback = lambda x: ask_user_for_config_file(x,conn_out) - ) + installer = ModelInstall(config, prediction_type_helper=lambda x: ask_user_for_prediction_type(x,conn_out)) + installer.install(selections) if conn_out: conn_out.send_bytes('*done*'.encode('utf-8')) conn_out.close() - def do_listings(opt)->bool: """List installed models of various sorts, and return True if any were requested.""" @@ -754,38 +671,34 @@ def select_and_download_models(opt: Namespace): if opt.full_precision else choose_precision(torch.device(choose_torch_device())) ) - - if do_listings(opt): - pass - # this processes command line additions/removals - elif opt.diffusers or opt.controlnets or opt.textual_inversions or opt.loras: - action = 'remove_models' if opt.delete else 'install_models' - diffusers_args = {'diffusers':ModelInstallList(remove_models=opt.diffusers or [])} \ - if opt.delete \ - else {'external_models':opt.diffusers or []} - install_requested_models( - **diffusers_args, - controlnet=ModelInstallList(**{action:opt.controlnets or []}), - ti=ModelInstallList(**{action:opt.textual_inversions or []}), - lora=ModelInstallList(**{action:opt.loras or []}), - precision=precision, - model_config_file_callback=lambda x: ask_user_for_config_file(x), + config.precision = precision + helper = lambda x: ask_user_for_prediction_type(x) + # if do_listings(opt): + # pass + + installer = ModelInstall(config, prediction_type_helper=helper) + if opt.add or opt.delete: + selections = InstallSelections( + install_models = opt.add or [], + remove_models = opt.delete or [] ) + installer.install(selections) elif opt.default_only: - install_requested_models( - diffusers=ModelInstallList(install_models=default_dataset()), - precision=precision, + selections = InstallSelections( + install_models = installer.default_model() ) + installer.install(selections) elif opt.yes_to_all: - install_requested_models( - diffusers=ModelInstallList(install_models=recommended_datasets()), - precision=precision, + selections = InstallSelections( + install_models = installer.recommended_models() ) + installer.install(selections) # this is where the TUI is called else: # needed because the torch library is loaded, even though we don't use it - torch.multiprocessing.set_start_method("spawn") + # currently commented out because it has started generating errors (?) + # torch.multiprocessing.set_start_method("spawn") # the third argument is needed in the Windows 11 environment in # order to launch and resize a console window running this program @@ -801,35 +714,20 @@ def select_and_download_models(opt: Namespace): installApp.main_form.subprocess.terminate() installApp.main_form.subprocess = None raise e - process_and_execute(opt, installApp.user_selections) + process_and_execute(opt, installApp.install_selections) # ------------------------------------- def main(): parser = argparse.ArgumentParser(description="InvokeAI model downloader") parser.add_argument( - "--diffusers", + "--add", nargs="*", - help="List of URLs or repo_ids of diffusers to install/delete", - ) - parser.add_argument( - "--loras", - nargs="*", - help="List of URLs or repo_ids of LoRA/LyCORIS models to install/delete", - ) - parser.add_argument( - "--controlnets", - nargs="*", - help="List of URLs or repo_ids of controlnet models to install/delete", - ) - parser.add_argument( - "--textual-inversions", - nargs="*", - help="List of URLs or repo_ids of textual inversion embeddings to install/delete", + help="List of URLs, local paths or repo_ids of models to install", ) parser.add_argument( "--delete", - action="store_true", - help="Delete models listed on command line rather than installing them", + nargs="*", + help="List of names of models to idelete", ) parser.add_argument( "--full-precision", @@ -849,7 +747,7 @@ def main(): parser.add_argument( "--default_only", action="store_true", - help="only install the default model", + help="Only install the default model", ) parser.add_argument( "--list-models", diff --git a/invokeai/frontend/install/widgets.py b/invokeai/frontend/install/widgets.py index 14167d4ee0..5ef7f6924e 100644 --- a/invokeai/frontend/install/widgets.py +++ b/invokeai/frontend/install/widgets.py @@ -17,8 +17,8 @@ from shutil import get_terminal_size from curses import BUTTON2_CLICKED,BUTTON3_CLICKED # minimum size for UIs -MIN_COLS = 120 -MIN_LINES = 50 +MIN_COLS = 180 +MIN_LINES = 55 # ------------------------------------- def set_terminal_size(columns: int, lines: int, launch_command: str=None): @@ -384,7 +384,6 @@ def select_stable_diffusion_config_file( "An SD v2.x base model (512 pixels; no 'parameterization:' line in its yaml file)", "An SD v2.x v-predictive model (768 pixels; 'parameterization: \"v\"' line in its yaml file)", "Skip installation for now and come back later", - "Enter config file path manually", ] F = ConfirmCancelPopup( @@ -406,35 +405,17 @@ def select_stable_diffusion_config_file( mlw.values = message choice = F.add( - SingleSelectWithChanged, + npyscreen.SelectOne, values = options, value = [0], max_height = len(options)+1, scroll_exit=True, ) - file = F.add( - FileBox, - name='Path to config file', - max_height=3, - hidden=True, - must_exist=True, - scroll_exit=True - ) - - def toggle_visible(value): - value = value[0] - if value==3: - file.hidden=False - else: - file.hidden=True - F.display() - - choice.on_changed = toggle_visible F.editw = 1 F.edit() if not F.value: return None - assert choice.value[0] in range(0,4),'invalid choice' - choices = ['epsilon','v','abort',file.value] + assert choice.value[0] in range(0,3),'invalid choice' + choices = ['epsilon','v','abort'] return choices[choice.value[0]] diff --git a/scripts/migrate_models_to_3.0.py b/scripts/migrate_models_to_3.0.py index 2d498df237..23db6d63da 100644 --- a/scripts/migrate_models_to_3.0.py +++ b/scripts/migrate_models_to_3.0.py @@ -26,7 +26,7 @@ from transformers import ( import invokeai.backend.util.logging as logger from invokeai.backend.model_management import ModelManager from invokeai.backend.model_management.model_probe import ( - ModelProbe, ModelType, BaseModelType, SchedulerPredictionType, ModelVariantInfo + ModelProbe, ModelType, BaseModelType, SchedulerPredictionType, ModelProbeInfo ) warnings.filterwarnings("ignore") @@ -171,13 +171,13 @@ def migrate_tuning_models(dest: Path): logger.info(f'Scanning {subdir}') migrate_models(src, dest) -def write_yaml(model_name: str, path:Path, info:ModelVariantInfo, dest_yaml: io.TextIOBase): +def write_yaml(model_name: str, path:Path, info:ModelProbeInfo, dest_yaml: io.TextIOBase): name = unique_name(model_name, info) stanza = { f'{info.base_type.value}/{info.model_type.value}/{name}': { 'name': model_name, 'path': str(path), - 'description': f'diffusers model {model_name}', + 'description': f'A {info.base_type.value} {info.model_type.value} model', 'format': 'diffusers', 'image_size': info.image_size, 'base': info.base_type.value, @@ -266,7 +266,7 @@ def migrate_checkpoints(dest_dir: Path, dest_yaml: io.TextIOBase): { 'name': model_name, 'path': str(weights), - 'description': f'checkpoint model {model_name}', + 'description': f'{info.base_type.value}-based checkpoint', 'format': 'checkpoint', 'image_size': info.image_size, 'base': info.base_type.value, From 15f8132e17ffbdba24ca7a42cdd9e1bbd6b737e7 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 16 Jun 2023 22:57:53 -0400 Subject: [PATCH 03/47] add direct-call script for model installer --- scripts/invokeai-model-install.py | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 scripts/invokeai-model-install.py diff --git a/scripts/invokeai-model-install.py b/scripts/invokeai-model-install.py new file mode 100644 index 0000000000..97bb499812 --- /dev/null +++ b/scripts/invokeai-model-install.py @@ -0,0 +1,3 @@ +from invokeai.frontend.install.model_install import main +main() + From ddb3f4b02b4101c4882c5e2cd46ee53db49fbc99 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 17 Jun 2023 19:26:35 -0400 Subject: [PATCH 04/47] make configure script work properly on empty rootdir --- .../backend/install/invokeai_configure.py | 8 ++++---- .../backend/install/model_install_backend.py | 2 ++ invokeai/frontend/install/model_install.py | 19 +++++++++++-------- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index 582b24cbfa..f2487efcfb 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -671,7 +671,8 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False): } ) ) - + with open(root / 'invokeai.yaml','w') as f: + f.write('#empty invokeai.yaml initialization file') # ------------------------------------- def run_console_ui( @@ -827,8 +828,6 @@ def main(): errors = set() try: - models_to_download = default_user_selections(opt) - # We check for to see if the runtime directory is correctly initialized. old_init_file = config.root_path / 'invokeai.init' new_init_file = config.root_path / 'invokeai.yaml' @@ -841,6 +840,7 @@ def main(): if not config.model_conf_path.exists(): initialize_rootdir(config.root_path, opt.yes_to_all) + models_to_download = default_user_selections(opt) if opt.yes_to_all: write_default_options(opt, new_init_file) init_options = Namespace( @@ -855,7 +855,7 @@ def main(): '\n** CANCELLED AT USER\'S REQUEST. USE THE "invoke.sh" LAUNCHER TO RUN LATER **\n' ) sys.exit(0) - + if opt.skip_support_models: logger.info("SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST") else: diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 54e5cdc1d8..ced5e99cdc 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -95,6 +95,8 @@ class ModelInstall(object): prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None, access_token:str = None): self.config = config + with open('log.txt','w') as file: + print(config.model_conf_path,file=file) self.mgr = ModelManager(config.model_conf_path) self.datasets = OmegaConf.load(Dataset_path) self.prediction_helper = prediction_type_helper diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 80ddebca84..a99251e78c 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -173,13 +173,14 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): rely=-3, when_pressed_function=self.on_back, ) - self.ok_button = self.add_widget_intelligent( - npyscreen.ButtonPress, - name=done_label, - relx=(window_width - len(done_label)) // 2, - rely=-3, - when_pressed_function=self.on_execute - ) + else: + self.ok_button = self.add_widget_intelligent( + npyscreen.ButtonPress, + name=done_label, + relx=(window_width - len(done_label)) // 2, + rely=-3, + when_pressed_function=self.on_execute + ) label = "APPLY CHANGES & EXIT" self.done = self.add_widget_intelligent( @@ -529,6 +530,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): ui_sections = [self.starter_pipelines, self.pipeline_models, self.controlnet_models, self.lora_models, self.ti_models] for section in ui_sections: + if not 'models_selected' in section: + continue selected = set([section['models'][x] for x in section['models_selected'].value]) models_to_install = [x for x in selected if not self.all_models[x].installed] models_to_remove = [x for x in section['models'] if x not in selected and self.all_models[x].installed] @@ -540,7 +543,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): for section in ui_sections: if downloads := section.get('download_ids'): selections.install_models.extend(downloads.value.split()) - + # load directory and whether to scan on startup selections.scan_directory = self.pipeline_models['autoload_directory'].value selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value From 294b1e83e681b0325814494b0ff1b00d6e5b6ea6 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 20 Jun 2023 09:42:10 -0400 Subject: [PATCH 05/47] test and fix edge cases --- .../backend/install/invokeai_configure.py | 115 +++++++++--------- .../backend/install/model_install_backend.py | 8 +- invokeai/frontend/install/model_install.py | 6 +- invokeai/frontend/install/widgets.py | 2 +- 4 files changed, 70 insertions(+), 61 deletions(-) diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index f2487efcfb..dc0e18d6fb 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -442,46 +442,46 @@ to allow InvokeAI to download restricted styles & subjects from the "Concept Lib begin_entry_at=4, scroll_exit=True, ) - self.nextrely += 1 - self.add_widget_intelligent( - npyscreen.FixedText, - value="Directories containing textual inversion, controlnet and LoRA models ( autocompletes, ctrl-N advances):", - editable=False, - color="CONTROL", - ) - self.embedding_dir = self.add_widget_intelligent( - npyscreen.TitleFilename, - name=" Textual Inversion Embeddings:", - value=str(default_embedding_dir()), - select_dir=True, - must_exist=False, - use_two_lines=False, - labelColor="GOOD", - begin_entry_at=32, - scroll_exit=True, - ) - self.lora_dir = self.add_widget_intelligent( - npyscreen.TitleFilename, - name=" LoRA and LyCORIS:", - value=str(default_lora_dir()), - select_dir=True, - must_exist=False, - use_two_lines=False, - labelColor="GOOD", - begin_entry_at=32, - scroll_exit=True, - ) - self.controlnet_dir = self.add_widget_intelligent( - npyscreen.TitleFilename, - name=" ControlNets:", - value=str(default_controlnet_dir()), - select_dir=True, - must_exist=False, - use_two_lines=False, - labelColor="GOOD", - begin_entry_at=32, - scroll_exit=True, - ) + # self.nextrely += 1 + # self.add_widget_intelligent( + # npyscreen.FixedText, + # value="Directories containing textual inversion, controlnet and LoRA models ( autocompletes, ctrl-N advances):", + # editable=False, + # color="CONTROL", + # ) + # self.embedding_dir = self.add_widget_intelligent( + # npyscreen.TitleFilename, + # name=" Textual Inversion Embeddings:", + # value=str(default_embedding_dir()), + # select_dir=True, + # must_exist=False, + # use_two_lines=False, + # labelColor="GOOD", + # begin_entry_at=32, + # scroll_exit=True, + # ) + # self.lora_dir = self.add_widget_intelligent( + # npyscreen.TitleFilename, + # name=" LoRA and LyCORIS:", + # value=str(default_lora_dir()), + # select_dir=True, + # must_exist=False, + # use_two_lines=False, + # labelColor="GOOD", + # begin_entry_at=32, + # scroll_exit=True, + # ) + # self.controlnet_dir = self.add_widget_intelligent( + # npyscreen.TitleFilename, + # name=" ControlNets:", + # value=str(default_controlnet_dir()), + # select_dir=True, + # must_exist=False, + # use_two_lines=False, + # labelColor="GOOD", + # begin_entry_at=32, + # scroll_exit=True, + # ) self.nextrely += 1 self.add_widget_intelligent( npyscreen.TitleFixedText, @@ -546,10 +546,10 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license bad_fields.append( f"The output directory does not seem to be valid. Please check that {str(Path(opt.outdir).parent)} is an existing directory." ) - if not Path(opt.embedding_dir).parent.exists(): - bad_fields.append( - f"The embedding directory does not seem to be valid. Please check that {str(Path(opt.embedding_dir).parent)} is an existing directory." - ) + # if not Path(opt.embedding_dir).parent.exists(): + # bad_fields.append( + # f"The embedding directory does not seem to be valid. Please check that {str(Path(opt.embedding_dir).parent)} is an existing directory." + # ) if len(bad_fields) > 0: message = "The following problems were detected and must be corrected:\n" for problem in bad_fields: @@ -569,9 +569,9 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license "max_loaded_models", "xformers_enabled", "always_use_cpu", - "embedding_dir", - "lora_dir", - "controlnet_dir", +# "embedding_dir", +# "lora_dir", +# "controlnet_dir", ]: setattr(new_opts, attr, getattr(self, attr).value) @@ -591,6 +591,7 @@ class EditOptApplication(npyscreen.NPSAppManaged): self.program_opts = program_opts self.invokeai_opts = invokeai_opts self.user_cancelled = False + self.autoload_pending = True self.install_selections = default_user_selections(program_opts) def onStart(self): @@ -719,17 +720,17 @@ def write_opts(opts: Namespace, init_file: Path): def default_output_dir() -> Path: return config.root_path / "outputs" -# ------------------------------------- -def default_embedding_dir() -> Path: - return config.root_path / "embeddings" +# # ------------------------------------- +# def default_embedding_dir() -> Path: +# return config.root_path / "embeddings" -# ------------------------------------- -def default_lora_dir() -> Path: - return config.root_path / "loras" +# # ------------------------------------- +# def default_lora_dir() -> Path: +# return config.root_path / "loras" -# ------------------------------------- -def default_controlnet_dir() -> Path: - return config.root_path / "controlnets" +# # ------------------------------------- +# def default_controlnet_dir() -> Path: +# return config.root_path / "controlnets" # ------------------------------------- def write_default_options(program_opts: Namespace, initfile: Path): @@ -755,7 +756,7 @@ def migrate_init_file(legacy_format:Path): new.nsfw_checker = old.safety_checker new.xformers_enabled = old.xformers new.conf_path = old.conf - new.embedding_dir = old.embedding_path +# new.embedding_dir = old.embedding_path invokeai_yaml = legacy_format.parent / 'invokeai.yaml' with open(invokeai_yaml,"w", encoding="utf-8") as outfile: diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index ced5e99cdc..f8bae1455b 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -227,10 +227,14 @@ class ModelInstall(object): # the model from being probed twice in the event that it has already been probed. def _install_path(self, path: Path, info: ModelProbeInfo=None): try: + logger.info(f'Probing {path}') info = info or ModelProbe().heuristic_probe(path,self.prediction_helper) if info.model_type == ModelType.Pipeline: + model_name = path.stem if info.format=='checkpoint' else path.name + if self.mgr.model_exists(model_name, info.base_type, info.model_type): + raise Exception(f'A model named "{model_name}" is already installed.') attributes = self._make_attributes(path,info) - self.mgr.add_model(model_name = path.stem if info.format=='checkpoint' else path.name, + self.mgr.add_model(model_name = model_name, base_model = info.base_type, model_type = info.model_type, model_attributes = attributes @@ -322,7 +326,7 @@ class ModelInstall(object): ) if info.format=="checkpoint": try: - legacy_conf = LEGACY_CONFIGS[info.base_type][info.variant_type][info.prediction_type] if BaseModelType.StableDiffusion2 \ + legacy_conf = LEGACY_CONFIGS[info.base_type][info.variant_type][info.prediction_type] if info.base_type == BaseModelType.StableDiffusion2 \ else LEGACY_CONFIGS[info.base_type][info.variant_type] except KeyError: legacy_conf = 'v1-inference.yaml' # best guess diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index a99251e78c..7d77d4e0cf 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -131,6 +131,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): window_width=window_width, exclude = self.starter_models ) + self.pipeline_models['autoload_pending'] = True bottom_of_table = max(bottom_of_table,self.nextrely) self.nextrely = top_of_table @@ -545,7 +546,9 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): selections.install_models.extend(downloads.value.split()) # load directory and whether to scan on startup - selections.scan_directory = self.pipeline_models['autoload_directory'].value + if self.parentApp.autoload_pending: + selections.scan_directory = self.pipeline_models['autoload_directory'].value + self.parentApp.autoload_pending = False selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value class AddModelApplication(npyscreen.NPSAppManaged): @@ -553,6 +556,7 @@ class AddModelApplication(npyscreen.NPSAppManaged): super().__init__() self.program_opts = opt self.user_cancelled = False + self.autoload_pending = True self.install_selections = InstallSelections() def onStart(self): diff --git a/invokeai/frontend/install/widgets.py b/invokeai/frontend/install/widgets.py index 5ef7f6924e..bace5839c1 100644 --- a/invokeai/frontend/install/widgets.py +++ b/invokeai/frontend/install/widgets.py @@ -378,7 +378,7 @@ def select_stable_diffusion_config_file( wrap:bool =True, model_name:str='Unknown', ): - message = "Please select the correct base model for the V2 checkpoint named {model_name}. Press to skip installation." + message = f"Please select the correct base model for the V2 checkpoint named '{model_name}'. Press to skip installation." title = "CONFIG FILE SELECTION" options=[ "An SD v2.x base model (512 pixels; no 'parameterization:' line in its yaml file)", From ac6403f8779711bb39eaea04f286bad478cfb04a Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 20 Jun 2023 11:08:27 -0400 Subject: [PATCH 06/47] address some of ebr issues --- .../backend/install/model_install_backend.py | 41 +++--- .../backend/model_management/model_install.py | 118 ------------------ .../backend/model_management/model_manager.py | 2 +- .../backend/model_management/model_probe.py | 12 +- invokeai/configs/INITIAL_MODELS.yaml | 4 +- 5 files changed, 36 insertions(+), 141 deletions(-) delete mode 100644 invokeai/backend/model_management/model_install.py diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index f8bae1455b..82b5da6ebb 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -95,8 +95,6 @@ class ModelInstall(object): prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None, access_token:str = None): self.config = config - with open('log.txt','w') as file: - print(config.model_conf_path,file=file) self.mgr = ModelManager(config.model_conf_path) self.datasets = OmegaConf.load(Dataset_path) self.prediction_helper = prediction_type_helper @@ -271,27 +269,36 @@ class ModelInstall(object): # we try to figure out how to download this most economically # list all the files in the repo files = [x.rfilename for x in hinfo.siblings] + location = None with TemporaryDirectory(dir=self.config.models_path) as staging: staging = Path(staging) if 'model_index.json' in files: location = self._download_hf_pipeline(repo_id, staging) # pipeline - - elif 'pytorch_lora_weights.bin' in files: - location = self._download_hf_model(repo_id, ['pytorch_lora_weights.bin'], staging) # LoRA - - elif self.config.precision=='float16' and 'diffusion_pytorch_model.fp16.safetensors' in files: # vae, controlnet or some other standalone - files = ['config.json', 'diffusion_pytorch_model.fp16.safetensors'] - location = self._download_hf_model(repo_id, files, staging) - - elif 'diffusion_pytorch_model.safetensors' in files: - files = ['config.json', 'diffusion_pytorch_model.safetensors'] - location = self._download_hf_model(repo_id, files, staging) - - elif 'learned_embeds.bin' in files: - location = self._download_hf_model(repo_id, ['learned_embeds.bin'], staging) - + else: + for suffix in ['safetensors','bin']: + if f'pytorch_lora_weights.{suffix}' in files: + location = self._download_hf_model(repo_id, ['pytorch_lora_weights.bin'], staging) # LoRA + break + elif self.config.precision=='float16' and f'diffusion_pytorch_model.fp16.{suffix}' in files: # vae, controlnet or some other standalone + files = ['config.json', f'diffusion_pytorch_model.fp16.{suffix}'] + location = self._download_hf_model(repo_id, files, staging) + break + elif f'diffusion_pytorch_model.{suffix}' in files: + files = ['config.json', f'diffusion_pytorch_model.{suffix}'] + location = self._download_hf_model(repo_id, files, staging) + break + elif f'learned_embeds.{suffix}' in files: + location = self._download_hf_model(repo_id, [f'learned_embeds.suffix'], staging) + break + if not location: + logger.warning(f'Could not determine type of repo {repo_id}. Skipping install.') + return + info = ModelProbe().heuristic_probe(location, self.prediction_helper) + if not info: + logger.warning(f'Could not probe {location}. Skipping install.') + return dest = self.config.models_path / info.base_type.value / info.model_type.value / self._get_model_name(repo_id,location) if dest.exists(): shutil.rmtree(dest) diff --git a/invokeai/backend/model_management/model_install.py b/invokeai/backend/model_management/model_install.py deleted file mode 100644 index 64c52185f3..0000000000 --- a/invokeai/backend/model_management/model_install.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -Routines for downloading and installing models. -""" -import json -import safetensors -import safetensors.torch -import shutil -import tempfile -import torch -import traceback -from dataclasses import dataclass -from diffusers import ModelMixin -from enum import Enum -from typing import Callable -from pathlib import Path - -import invokeai.backend.util.logging as logger -from invokeai.app.services.config import InvokeAIAppConfig -from . import ModelManager -from .models import BaseModelType, ModelType, VariantType -from .model_probe import ModelProbe, ModelVariantInfo -from .model_cache import SilenceWarnings - -class ModelInstall(object): - ''' - This class is able to download and install several different kinds of - InvokeAI models. The helper function, if provided, is called on to distinguish - between v2-base and v2-768 stable diffusion pipelines. This usually involves - asking the user to select the proper type, as there is no way of distinguishing - the two type of v2 file programmatically (as far as I know). - ''' - def __init__(self, - config: InvokeAIAppConfig, - model_base_helper: Callable[[Path],BaseModelType]=None, - clobber:bool = False - ): - ''' - :param config: InvokeAI configuration object - :param model_base_helper: A function call that accepts the Path to a checkpoint model and returns a ModelType enum - :param clobber: If true, models with colliding names will be overwritten - ''' - self.config = config - self.clogger = clobber - self.helper = model_base_helper - self.prober = ModelProbe() - - def install_checkpoint_file(self, checkpoint: Path)->dict: - ''' - Install the checkpoint file at path and return a - configuration entry that can be added to `models.yaml`. - Model checkpoints and VAEs will be converted into - diffusers before installation. Note that the model manager - does not hold entries for anything but diffusers pipelines, - and the configuration file stanzas returned from such models - can be safely ignored. - ''' - model_info = self.prober.probe(checkpoint, self.helper) - if not model_info: - raise ValueError(f"Unable to determine type of checkpoint file {checkpoint}") - - key = ModelManager.create_key( - model_name = checkpoint.stem, - base_model = model_info.base_type, - model_type = model_info.model_type, - ) - destination_path = self._dest_path(model_info) / checkpoint - destination_path.parent.mkdir(parents=True, exist_ok=True) - self._check_for_collision(destination_path) - stanza = { - key: dict( - name = checkpoint.stem, - description = f'{model_info.model_type} model {checkpoint.stem}', - base = model_info.base_model.value, - type = model_info.model_type.value, - variant = model_info.variant_type.value, - path = str(destination_path), - ) - } - - # non-pipeline; no conversion needed, just copy into right place - if model_info.model_type != ModelType.Pipeline: - shutil.copyfile(checkpoint, destination_path) - stanza[key].update({'format': 'checkpoint'}) - - # pipeline - conversion needed here - else: - destination_path = self._dest_path(model_info) / checkpoint.stem - config_file = self._pipeline_type_to_config_file(model_info.model_type) - - from .convert_ckpt_to_diffusers import convert_ckpt_to_diffusers - with SilenceWarnings: - convert_ckpt_to_diffusers( - checkpoint, - destination_path, - extract_ema=True, - original_config_file=config_file, - scan_needed=False, - ) - stanza[key].update({'format': 'folder', - 'path': destination_path, # no suffix on this - }) - - return stanza - - - def _check_for_collision(self, path: Path): - if not path.exists(): - return - if self.clobber: - shutil.rmtree(path) - else: - raise ValueError(f"Destination {path} already exists. Won't overwrite unless clobber=True.") - - def _staging_directory(self)->tempfile.TemporaryDirectory: - return tempfile.TemporaryDirectory(dir=self.config.root_path) - - - diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index bb46c80b77..e56ec2a0d2 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -703,7 +703,7 @@ class ModelManager(object): model_path = os.path.join(models_dir, entry_name) if model_path not in loaded_files: # TODO: check model_path = Path(model_path) - model_name = model_path.name if model_path.is_dir else model_path.stem + model_name = model_path.name if model_path.is_dir() else model_path.stem model_key = self.create_key(model_name, base_model, model_type) if model_key in self.models: diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py index 59e0c8e970..ec3146a00e 100644 --- a/invokeai/backend/model_management/model_probe.py +++ b/invokeai/backend/model_management/model_probe.py @@ -401,8 +401,16 @@ class ControlNetFolderProbe(FolderProbeBase): else BaseModelType.StableDiffusion2 class LoRAFolderProbe(FolderProbeBase): - # I've never seen one of these in the wild, so this is a noop - pass + def get_base_type(self)->BaseModelType: + model_file = None + for suffix in ['safetensors','bin']: + base_file = self.folder_path / f'pytorch_lora_weights.{suffix}' + if base_file.exists(): + model_file = base_file + break + if not model_file: + raise Exception('Unknown LoRA format encountered') + return LoRACheckpointProbe(model_file,None).get_base_type() ############## register probe classes ###### ModelProbe.register_probe('diffusers', ModelType.Pipeline, PipelineFolderProbe) diff --git a/invokeai/configs/INITIAL_MODELS.yaml b/invokeai/configs/INITIAL_MODELS.yaml index cb16f3ed4b..e2288c43fd 100644 --- a/invokeai/configs/INITIAL_MODELS.yaml +++ b/invokeai/configs/INITIAL_MODELS.yaml @@ -87,7 +87,5 @@ sd-1/embedding/ahx-beta-453407d: repo_id: sd-concepts-library/ahx-beta-453407d sd-1/lora/LowRA: path: https://civitai.com/api/download/models/63006 -sd-1/lora/Ink Scenery: +sd-1/lora/Ink scenery: path: https://civitai.com/api/download/models/83390 -sd-1/lora/sd-model-finetuned-lora-t4: - repo_id: sayakpaul/sd-model-finetuned-lora-t4 From 2fc19d9afae805bcc1604581cd2ac4e6cb8289f4 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 20 Jun 2023 11:45:37 -0400 Subject: [PATCH 07/47] suppress description in "other models" tab for space reasons --- invokeai/backend/install/model_install_backend.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 82b5da6ebb..a26ec0ef72 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -132,7 +132,7 @@ class ModelInstall(object): name = name, base_type = base, model_type = model_type, - description = value.get('description'), +# description = value.get('description'), path = value.get('path'), installed = True, ) From b727442f844931bed57f9dd55c3e26221b594ab0 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 21 Jun 2023 09:32:58 -0400 Subject: [PATCH 08/47] better window size behavior under alacritty & terminator --- invokeai/frontend/install/model_install.py | 2 +- invokeai/frontend/install/widgets.py | 16 ++++++++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 7d77d4e0cf..206cdcacdb 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -161,7 +161,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): BufferBox, name='Log Messages', editable=False, - max_height = 16, + max_height = 10, ) self.nextrely += 1 diff --git a/invokeai/frontend/install/widgets.py b/invokeai/frontend/install/widgets.py index bace5839c1..6b56fa875c 100644 --- a/invokeai/frontend/install/widgets.py +++ b/invokeai/frontend/install/widgets.py @@ -17,8 +17,8 @@ from shutil import get_terminal_size from curses import BUTTON2_CLICKED,BUTTON3_CLICKED # minimum size for UIs -MIN_COLS = 180 -MIN_LINES = 55 +MIN_COLS = 130 +MIN_LINES = 40 # ------------------------------------- def set_terminal_size(columns: int, lines: int, launch_command: str=None): @@ -61,6 +61,12 @@ def _set_terminal_size_unix(width: int, height: int): import fcntl import termios + # These terminals accept the size command and report that the + # size changed, but they lie!!! + for bad_terminal in ['TERMINATOR_UUID', 'ALACRITTY_WINDOW_ID']: + if os.environ.get(bad_terminal): + return + winsize = struct.pack("HHHH", height, width, 0, 0) fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize) sys.stdout.write("\x1b[8;{height};{width}t".format(height=height, width=width)) @@ -75,6 +81,12 @@ def set_min_terminal_size(min_cols: int, min_lines: int, launch_command: str=Non lines = max(term_lines, min_lines) set_terminal_size(cols, lines, launch_command) + # did it work? + term_cols, term_lines = get_terminal_size() + if term_cols < cols or term_lines < lines: + print(f'This window is too small for optimal display. For best results, please enlarge it.') + input('After resizing, press any key to continue...') + class IntSlider(npyscreen.Slider): def translate_value(self): stri = "%2d / %2d" % (self.value, self.out_of) From 1c31efa57c1839b352e02d4d09eacb06086362c2 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 21 Jun 2023 09:37:24 -0400 Subject: [PATCH 09/47] punctuation fix in user message --- invokeai/frontend/install/widgets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/frontend/install/widgets.py b/invokeai/frontend/install/widgets.py index 6b56fa875c..757dd527a6 100644 --- a/invokeai/frontend/install/widgets.py +++ b/invokeai/frontend/install/widgets.py @@ -84,7 +84,7 @@ def set_min_terminal_size(min_cols: int, min_lines: int, launch_command: str=Non # did it work? term_cols, term_lines = get_terminal_size() if term_cols < cols or term_lines < lines: - print(f'This window is too small for optimal display. For best results, please enlarge it.') + print(f'This window is too small for optimal display. For best results please enlarge it.') input('After resizing, press any key to continue...') class IntSlider(npyscreen.Slider): From 33b04f63864ceab35f024b8f94de5db5a2ca87f9 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 22 Jun 2023 15:47:12 -0400 Subject: [PATCH 10/47] migration script working well --- invokeai/backend/install/migrate_to_3.py | 474 +++++++++++++++++++++++ pyproject.toml | 1 + scripts/invokeai-migrate3 | 4 + scripts/migrate_models_to_3.0.py | 322 --------------- 4 files changed, 479 insertions(+), 322 deletions(-) create mode 100644 invokeai/backend/install/migrate_to_3.py create mode 100644 scripts/invokeai-migrate3 delete mode 100644 scripts/migrate_models_to_3.0.py diff --git a/invokeai/backend/install/migrate_to_3.py b/invokeai/backend/install/migrate_to_3.py new file mode 100644 index 0000000000..d7c09c4756 --- /dev/null +++ b/invokeai/backend/install/migrate_to_3.py @@ -0,0 +1,474 @@ +''' +Migrate the models directory and models.yaml file from an existing +InvokeAI 2.3 installation to 3.0.0. +''' + +import io +import os +import argparse +import shutil +import yaml + +import transformers +import diffusers +import warnings + +from dataclasses import dataclass +from pathlib import Path +from omegaconf import OmegaConf +from diffusers import StableDiffusionPipeline, AutoencoderKL +from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker +from transformers import ( + CLIPTextModel, + CLIPTokenizer, + AutoFeatureExtractor, + BertTokenizerFast, +) + +import invokeai.backend.util.logging as logger +from invokeai.backend.model_management import ModelManager +from invokeai.backend.model_management.model_probe import ( + ModelProbe, ModelType, BaseModelType, SchedulerPredictionType, ModelProbeInfo + ) + +warnings.filterwarnings("ignore") +transformers.logging.set_verbosity_error() +diffusers.logging.set_verbosity_error() + +# holder for paths that we will migrate +@dataclass +class ModelPaths: + models: Path + embeddings: Path + loras: Path + controlnets: Path + +class MigrateTo3(object): + def __init__(self, + root_directory: Path, + dest_models: Path, + yaml_file: io.TextIOBase, + src_paths: ModelPaths, + ): + self.root_directory = root_directory + self.dest_models = dest_models + self.dest_yaml = yaml_file + self.model_names = set() + self.src_paths = src_paths + + self._initialize_yaml() + + def _initialize_yaml(self): + self.dest_yaml.write( + yaml.dump( + { + '__metadata__': + { + 'version':'3.0.0'} + } + ) + ) + + def unique_name(self,name,info)->str: + ''' + Create a unique name for a model for use within models.yaml. + ''' + done = False + key = ModelManager.create_key(name,info.base_type,info.model_type) + unique_name = key + counter = 1 + while not done: + if unique_name in self.model_names: + unique_name = f'{key}-{counter:0>2d}' + counter += 1 + else: + done = True + self.model_names.add(unique_name) + name,_,_ = ModelManager.parse_key(unique_name) + return name + + def create_directory_structure(self): + ''' + Create the basic directory structure for the models folder. + ''' + for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]: + for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora, + ModelType.ControlNet,ModelType.TextualInversion]: + path = self.dest_models / model_base.value / model_type.value + path.mkdir(parents=True, exist_ok=True) + path = self.dest_models / 'core' + path.mkdir(parents=True, exist_ok=True) + + @staticmethod + def copy_file(src:Path,dest:Path): + ''' + copy a single file with logging + ''' + logger.info(f'Copying {str(src)} to {str(dest)}') + try: + shutil.copy(src, dest) + except Exception as e: + logger.error(f'COPY FAILED: {str(e)}') + + @staticmethod + def copy_dir(src:Path,dest:Path): + ''' + Recursively copy a directory with logging + ''' + logger.info(f'Copying {str(src)} to {str(dest)}') + try: + shutil.copytree(src, dest) + except Exception as e: + logger.error(f'COPY FAILED: {str(e)}') + + def migrate_models(self, src_dir: Path): + ''' + Recursively walk through src directory, probe anything + that looks like a model, and copy the model into the + appropriate location within the destination models directory. + ''' + dest_dir = self.dest_models + for root, dirs, files in os.walk(src_dir): + for f in files: + # hack - don't copy raw learned_embeds.bin, let them + # be copied as part of a tree copy operation + if f == 'learned_embeds.bin': + continue + try: + model = Path(root,f) + info = ModelProbe().heuristic_probe(model) + if not info: + continue + dest = Path(dest_dir, info.base_type.value, info.model_type.value, f) + self.copy_file(model, dest) + except KeyboardInterrupt: + raise + except Exception as e: + logger.error(str(e)) + for d in dirs: + try: + model = Path(root,d) + info = ModelProbe().heuristic_probe(model) + if not info: + continue + dest = Path(dest_dir, info.base_type.value, info.model_type.value, model.name) + self.copy_dir(model, dest) + except KeyboardInterrupt: + raise + except Exception as e: + logger.error(str(e)) + + # TO DO: Rewrite this to support alternate locations for esrgan and gfpgan in init file + def migrate_support_models(self): + ''' + Copy the clipseg, upscaler, and restoration models to their new + locations. + ''' + dest_directory = self.dest_models + if (self.root_directory / 'models/clipseg').exists(): + self.copy_dir(self.root_directory / 'models/clipseg', dest_directory / 'core/misc/clipseg') + if (self.root_directory / 'models/realesrgan').exists(): + self.copy_dir(self.root_directory / 'models/realesrgan', dest_directory / 'core/upscaling/realesrgan') + for d in ['codeformer','gfpgan']: + path = self.root_directory / 'models' / d + if path.exists(): + self.copy_dir(path,dest_directory / f'core/face_restoration/{d}') + + def migrate_tuning_models(self): + ''' + Migrate the embeddings, loras and controlnets directories to their new homes. + ''' + for src in [self.src_paths.embeddings, self.src_paths.loras, self.src_paths.controlnets]: + if not src: + continue + if src.is_dir(): + logger.info(f'Scanning {src}') + self.migrate_models(src) + else: + logger.info(f'{src} directory not found; skipping') + continue + + def migrate_conversion_models(self): + ''' + Migrate all the models that are needed by the ckpt_to_diffusers conversion + script. + ''' + + dest_directory = self.dest_models + kwargs = dict( + cache_dir = self.root_directory / 'models/hub', + #local_files_only = True + ) + try: + logger.info('Migrating core tokenizers and text encoders') + target_dir = dest_directory / 'core' / 'convert' + + # bert + bert = BertTokenizerFast.from_pretrained("bert-base-uncased", **kwargs) + bert.save_pretrained(target_dir / 'bert-base-uncased', safe_serialization=True) + + # sd-1 + repo_id = 'openai/clip-vit-large-patch14' + pipeline = CLIPTokenizer.from_pretrained(repo_id, **kwargs) + pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14' / 'tokenizer', safe_serialization=True) + + pipeline = CLIPTextModel.from_pretrained(repo_id, **kwargs) + pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14' / 'text_encoder', safe_serialization=True) + + # sd-2 + repo_id = "stabilityai/stable-diffusion-2" + pipeline = CLIPTokenizer.from_pretrained(repo_id, subfolder="tokenizer", **kwargs) + pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'tokenizer', safe_serialization=True) + + pipeline = CLIPTextModel.from_pretrained(repo_id, subfolder="text_encoder", **kwargs) + pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'text_encoder', safe_serialization=True) + + # VAE + logger.info('Migrating stable diffusion VAE') + vae = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse', **kwargs) + vae.save_pretrained(target_dir / 'sd-vae-ft-mse', safe_serialization=True) + + # safety checking + logger.info('Migrating safety checker') + repo_id = "CompVis/stable-diffusion-safety-checker" + pipeline = AutoFeatureExtractor.from_pretrained(repo_id,**kwargs) + pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True) + + pipeline = StableDiffusionSafetyChecker.from_pretrained(repo_id,**kwargs) + pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True) + except KeyboardInterrupt: + raise + except Exception as e: + logger.error(str(e)) + + def write_yaml(self, model_name: str, path:Path, info:ModelProbeInfo, **kwargs): + ''' + Write a stanza for a moved model into the new models.yaml file. + ''' + name = self.unique_name(model_name, info) + stanza = { + f'{info.base_type.value}/{info.model_type.value}/{name}': { + 'name': model_name, + 'path': str(path), + 'description': f'A {info.base_type.value} {info.model_type.value} model', + 'format': info.format, + 'image_size': info.image_size, + 'base': info.base_type.value, + 'variant': info.variant_type.value, + 'prediction_type': info.prediction_type.value, + 'upcast_attention': info.prediction_type == SchedulerPredictionType.VPrediction, + **kwargs, + } + } + self.dest_yaml.write(yaml.dump(stanza)) + self.dest_yaml.flush() + + def migrate_repo_id(self, repo_id: str, model_name :str=None): + ''' + Migrate a locally-cached diffusers pipeline identified with a repo_id + ''' + dest_dir = self.dest_models + + cache = self.root_directory / 'models/hub' + kwargs = dict( + cache_dir = cache, + safety_checker = None, + # local_files_only = True, + ) + + owner,repo_name = repo_id.split('/') + model_name = model_name or repo_name + model = cache / '--'.join(['models',owner,repo_name]) + + if len(list(model.glob('snapshots/**/model_index.json')))==0: + return + revisions = [x.name for x in model.glob('refs/*')] + + # if an fp16 is available we use that + revision = 'fp16' if len(revisions) > 1 and 'fp16' in revisions else revisions[0] + pipeline = StableDiffusionPipeline.from_pretrained( + repo_id, + revision=revision, + **kwargs) + + info = ModelProbe().heuristic_probe(pipeline) + if not info: + return + + dest = Path(dest_dir, info.base_type.value, info.model_type.value, f'{repo_name}') + pipeline.save_pretrained(dest, safe_serialization=True) + rel_path = Path('models',dest.relative_to(dest_dir)) + self.write_yaml(model_name, path=rel_path, info=info) + + def migrate_path(self, location: Path, model_name: str=None, **extra_config): + ''' + Migrate a model referred to using 'weights' or 'path' + ''' + + # handle relative paths + dest_dir = self.dest_models + location = self.src_paths.models / location + + info = ModelProbe().heuristic_probe(location) + if not info: + return + + # uh oh, weights is in the old models directory - move it into the new one + if Path(location).is_relative_to(self.src_paths.models): + dest = Path(dest_dir, info.base_type.value, info.model_type.value, location.name) + self.copy_dir(location,dest) + location = Path('models', info.base_type.value, info.model_type.value, location.name) + model_name = model_name or location.stem + model_name = self.unique_name(model_name, info) + self.write_yaml(model_name, path=location, info=info, **extra_config) + + def migrate_defined_models(self): + ''' + Migrate models defined in models.yaml + ''' + # find any models referred to in old models.yaml + conf = OmegaConf.load(self.root_directory / 'configs/models.yaml') + + for model_name, stanza in conf.items(): + + try: + if repo_id := stanza.get('repo_id'): + logger.info(f'Migrating diffusers model {model_name}') + self.migrate_repo_id(repo_id, model_name) + + elif location := stanza.get('weights'): + logger.info(f'Migrating checkpoint model {model_name}') + self.migrate_path(Path(location), model_name, config=stanza.get('config')) + elif location := stanza.get('path'): + logger.info(f'Migrating diffusers model {model_name}') + self.migrate_path(Path(location), model_name, config=stanza.get('config')) + + except KeyboardInterrupt: + raise + except Exception as e: + logger.error(str(e)) + + def migrate(self): + self.create_directory_structure() + # the configure script is doing this + self.migrate_support_models() + self.migrate_conversion_models() + self.migrate_tuning_models() + self.migrate_defined_models() + +def _parse_legacy_initfile(root: Path, initfile: Path)->ModelPaths: + ''' + Returns tuple of (embedding_path, lora_path, controlnet_path) + ''' + parser = argparse.ArgumentParser(fromfile_prefix_chars='@') + parser.add_argument( + '--embedding_directory', + '--embedding_path', + type=Path, + dest='embedding_path', + default=Path('embeddings'), + ) + parser.add_argument( + '--lora_directory', + dest='lora_path', + type=Path, + default=Path('loras'), + ) + opt,_ = parser.parse_known_args([f'@{str(initfile)}']) + return ModelPaths( + models = root / 'models', + embeddings = root / str(opt.embedding_path).strip('"'), + loras = root / str(opt.lora_path).strip('"'), + controlnets = None + ) + +def _parse_legacy_yamlfile(root: Path, initfile: Path)->ModelPaths: + ''' + Returns tuple of (embedding_path, lora_path, controlnet_path) + ''' + # Don't use the config object because it is unforgiving of version updates + # Just use omegaconf directly + opt = OmegaConf.load(initfile) + paths = opt.InvokeAI.Paths + models = paths.get('models_dir','models') + embeddings = paths.get('embedding_dir','embeddings') + loras = paths.get('lora_dir','loras') + controlnets = paths.get('controlnet_dir','controlnets') + return ModelPaths( + models = root / models, + embeddings = root / embeddings, + loras = root /loras, + controlnets = root / controlnets, + ) + +def get_legacy_embeddings(root: Path) -> ModelPaths: + path = root / 'invokeai.init' + if path.exists(): + return _parse_legacy_initfile(root, path) + path = root / 'invokeai.yaml' + if path.exists(): + return _parse_legacy_yamlfile(root, path) + +def main(): + parser = argparse.ArgumentParser(prog="invokeai-migrate3", + description=""" +This will copy and convert the models directory and the configs/models.yaml from the InvokeAI 2.3 format +'--from-directory' root to the InvokeAI 3.0 '--to-directory' root. These may be abbreviated '--from' and '--to'.a + +The old models directory and config file will be renamed 'models.orig' and 'models.yaml.orig' respectively. +It is safe to provide the same directory for both arguments, but it is better to use the invokeai_configure +script, which will perform a full upgrade in place.""" + ) + parser.add_argument('--from-directory', + dest='root_directory', + type=Path, + required=True, + help='Source InvokeAI 2.3 root directory (containing "invokeai.init" or "invokeai.yaml")' + ) + parser.add_argument('--to-directory', + dest='dest_directory', + type=Path, + required=True, + help='Destination InvokeAI 3.0 directory (containing "invokeai.yaml")' + ) +# parser.add_argument('--all-models', +# action="store_true", +# help='Migrate all models found in `models` directory, not just those mentioned in models.yaml', +# ) + args = parser.parse_args() + root_directory = args.root_directory + assert root_directory.is_dir(), f"{root_directory} is not a valid directory" + assert (root_directory / 'models').is_dir(), f"{root_directory} does not contain a 'models' subdirectory" + assert (root_directory / 'invokeai.init').exists() or (root_directory / 'invokeai.yaml').exists(), f"{root_directory} does not contain an InvokeAI init file." + + dest_directory = args.dest_directory + assert dest_directory.is_dir(), f"{dest_directory} is not a valid directory" + assert (dest_directory / 'models').is_dir(), f"{dest_directory} does not contain a 'models' subdirectory" + assert (dest_directory / 'invokeai.yaml').exists(), f"{dest_directory} does not contain an InvokeAI init file." + + dest_models = dest_directory / 'models-3.0' + dest_yaml = dest_directory / 'configs/models.yaml-3.0' + + paths = get_legacy_embeddings(root_directory) + + with open(dest_yaml,'w') as yaml_file: + migrator = MigrateTo3(root_directory, + dest_models, + yaml_file, + src_paths = paths, + ) + migrator.migrate() + + (dest_directory / 'models').replace(dest_directory / 'models.orig') + dest_models.replace(dest_directory / 'models') + + (dest_directory /'configs/models.yaml').replace(dest_directory / 'configs/models.yaml.orig') + dest_yaml.replace(dest_directory / 'configs/models.yaml') + print(f"""Migration successful. +Original models directory moved to {dest_directory}/models.orig +Original models.yaml file moved to {dest_directory}/configs/models.yaml.orig +""") + +if __name__ == '__main__': + main() + diff --git a/pyproject.toml b/pyproject.toml index 03396312ac..608033bbaa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -120,6 +120,7 @@ dependencies = [ "invokeai-merge" = "invokeai.frontend.merge:invokeai_merge_diffusers" "invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion" "invokeai-model-install" = "invokeai.frontend.install:invokeai_model_install" +"invokeai-migrate3" = "invokeai.backend.install.migrate_to_3:main" "invokeai-update" = "invokeai.frontend.install:invokeai_update" "invokeai-metadata" = "invokeai.frontend.CLI.sd_metadata:print_metadata" "invokeai-node-cli" = "invokeai.app.cli_app:invoke_cli" diff --git a/scripts/invokeai-migrate3 b/scripts/invokeai-migrate3 new file mode 100644 index 0000000000..135020f871 --- /dev/null +++ b/scripts/invokeai-migrate3 @@ -0,0 +1,4 @@ +from invokeai.backend.install.migrate_to_3 import main + +if __name__=='__main__': + main() diff --git a/scripts/migrate_models_to_3.0.py b/scripts/migrate_models_to_3.0.py deleted file mode 100644 index 23db6d63da..0000000000 --- a/scripts/migrate_models_to_3.0.py +++ /dev/null @@ -1,322 +0,0 @@ -''' -Migrate the models directory and models.yaml file from an existing -InvokeAI 2.3 installation to 3.0.0. -''' - -import io -import os -import argparse -import shutil -import yaml - -import transformers -import diffusers -import warnings -from pathlib import Path -from omegaconf import OmegaConf -from diffusers import StableDiffusionPipeline, AutoencoderKL -from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from transformers import ( - CLIPTextModel, - CLIPTokenizer, - AutoFeatureExtractor, - BertTokenizerFast, -) - -import invokeai.backend.util.logging as logger -from invokeai.backend.model_management import ModelManager -from invokeai.backend.model_management.model_probe import ( - ModelProbe, ModelType, BaseModelType, SchedulerPredictionType, ModelProbeInfo - ) - -warnings.filterwarnings("ignore") -transformers.logging.set_verbosity_error() -diffusers.logging.set_verbosity_error() - -model_names = set() - -def unique_name(name,info)->str: - done = False - key = ModelManager.create_key(name,info.base_type,info.model_type) - unique_name = key - counter = 1 - while not done: - if unique_name in model_names: - unique_name = f'{key}-{counter:0>2d}' - counter += 1 - else: - done = True - model_names.add(unique_name) - name,_,_ = ModelManager.parse_key(unique_name) - return name - -def create_directory_structure(dest: Path): - for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]: - for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora, - ModelType.ControlNet,ModelType.TextualInversion]: - path = dest / model_base.value / model_type.value - path.mkdir(parents=True, exist_ok=True) - path = dest / 'core' - path.mkdir(parents=True, exist_ok=True) - -def copy_file(src:Path,dest:Path): - logger.info(f'Copying {str(src)} to {str(dest)}') - try: - shutil.copy(src, dest) - except Exception as e: - logger.error(f'COPY FAILED: {str(e)}') - -def copy_dir(src:Path,dest:Path): - logger.info(f'Copying {str(src)} to {str(dest)}') - try: - shutil.copytree(src, dest) - except Exception as e: - logger.error(f'COPY FAILED: {str(e)}') - -def migrate_models(src_dir: Path, dest_dir: Path): - for root, dirs, files in os.walk(src_dir): - for f in files: - # hack - don't copy raw learned_embeds.bin, let them - # be copied as part of a tree copy operation - if f == 'learned_embeds.bin': - continue - try: - model = Path(root,f) - info = ModelProbe().heuristic_probe(model) - if not info: - continue - dest = Path(dest_dir, info.base_type.value, info.model_type.value, f) - copy_file(model, dest) - except KeyboardInterrupt: - raise - except Exception as e: - logger.error(str(e)) - for d in dirs: - try: - model = Path(root,d) - info = ModelProbe().heuristic_probe(model) - if not info: - continue - dest = Path(dest_dir, info.base_type.value, info.model_type.value, model.name) - copy_dir(model, dest) - except KeyboardInterrupt: - raise - except Exception as e: - logger.error(str(e)) - -def migrate_support_models(dest_directory: Path): - if Path('./models/clipseg').exists(): - copy_dir(Path('./models/clipseg'),dest_directory / 'core/misc/clipseg') - if Path('./models/realesrgan').exists(): - copy_dir(Path('./models/realesrgan'),dest_directory / 'core/upscaling/realesrgan') - for d in ['codeformer','gfpgan']: - path = Path('./models',d) - if path.exists(): - copy_dir(path,dest_directory / f'core/face_restoration/{d}') - -def migrate_conversion_models(dest_directory: Path): - # These are needed for the conversion script - kwargs = dict( - cache_dir = Path('./models/hub'), - #local_files_only = True - ) - try: - logger.info('Migrating core tokenizers and text encoders') - target_dir = dest_directory / 'core' / 'convert' - - # bert - bert = BertTokenizerFast.from_pretrained("bert-base-uncased", **kwargs) - bert.save_pretrained(target_dir / 'bert-base-uncased', safe_serialization=True) - - # sd-1 - repo_id = 'openai/clip-vit-large-patch14' - pipeline = CLIPTokenizer.from_pretrained(repo_id, **kwargs) - pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14' / 'tokenizer', safe_serialization=True) - - pipeline = CLIPTextModel.from_pretrained(repo_id, **kwargs) - pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14' / 'text_encoder', safe_serialization=True) - - # sd-2 - repo_id = "stabilityai/stable-diffusion-2" - pipeline = CLIPTokenizer.from_pretrained(repo_id, subfolder="tokenizer", **kwargs) - pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'tokenizer', safe_serialization=True) - - pipeline = CLIPTextModel.from_pretrained(repo_id, subfolder="text_encoder", **kwargs) - pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'text_encoder', safe_serialization=True) - - # VAE - logger.info('Migrating stable diffusion VAE') - vae = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse', **kwargs) - vae.save_pretrained(target_dir / 'sd-vae-ft-mse', safe_serialization=True) - - # safety checking - logger.info('Migrating safety checker') - repo_id = "CompVis/stable-diffusion-safety-checker" - pipeline = AutoFeatureExtractor.from_pretrained(repo_id,**kwargs) - pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True) - - pipeline = StableDiffusionSafetyChecker.from_pretrained(repo_id,**kwargs) - pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True) - except KeyboardInterrupt: - raise - except Exception as e: - logger.error(str(e)) - -def migrate_tuning_models(dest: Path): - for subdir in ['embeddings','loras','controlnets']: - src = Path('.',subdir) - if not src.is_dir(): - logger.info(f'{subdir} directory not found; skipping') - continue - logger.info(f'Scanning {subdir}') - migrate_models(src, dest) - -def write_yaml(model_name: str, path:Path, info:ModelProbeInfo, dest_yaml: io.TextIOBase): - name = unique_name(model_name, info) - stanza = { - f'{info.base_type.value}/{info.model_type.value}/{name}': { - 'name': model_name, - 'path': str(path), - 'description': f'A {info.base_type.value} {info.model_type.value} model', - 'format': 'diffusers', - 'image_size': info.image_size, - 'base': info.base_type.value, - 'variant': info.variant_type.value, - 'prediction_type': info.prediction_type.value, - 'upcast_attention': info.prediction_type == SchedulerPredictionType.VPrediction - } - } - dest_yaml.write(yaml.dump(stanza)) - dest_yaml.flush() - -def migrate_converted(dest_dir: Path, dest_yaml: io.TextIOBase): - for sub_dir in [Path('./models/converted_ckpts'),Path('./models/optimize-ckpts')]: - for model in sub_dir.glob('*'): - if not model.is_dir(): - continue - info = ModelProbe().heuristic_probe(model) - if not info: - continue - dest = Path(dest_dir, info.base_type.value, info.model_type.value, model.name) - try: - copy_dir(model,dest) - rel_path = Path('models',dest.relative_to(dest_dir)) - write_yaml(model.name,path=rel_path,info=info, dest_yaml=dest_yaml) - except KeyboardInterrupt: - raise - except Exception as e: - logger.warning(f'Could not migrate the converted diffusers {model.name}: {str(e)}. Skipping.') - -def migrate_pipelines(dest_dir: Path, dest_yaml: io.TextIOBase): - cache = Path('./models/hub') - kwargs = dict( - cache_dir = cache, - safety_checker = None, - # local_files_only = True, - ) - for model in cache.glob('models--*'): - if len(list(model.glob('snapshots/**/model_index.json')))==0: - continue - _,owner,repo_name=model.name.split('--') - repo_id = f'{owner}/{repo_name}' - revisions = [x.name for x in model.glob('refs/*')] - - # if an fp16 is available we use that - revision = 'fp16' if len(revisions) > 1 and 'fp16' in revisions else revisions[0] - logger.info(f'Migrating {repo_id}, revision {revision}') - try: - pipeline = StableDiffusionPipeline.from_pretrained( - repo_id, - revision=revision, - **kwargs) - info = ModelProbe().heuristic_probe(pipeline) - if not info: - continue - dest = Path(dest_dir, info.base_type.value, info.model_type.value, f'{repo_name}') - pipeline.save_pretrained(dest, safe_serialization=True) - rel_path = Path('models',dest.relative_to(dest_dir)) - write_yaml(repo_name, path=rel_path, info=info, dest_yaml=dest_yaml) - except KeyboardInterrupt: - raise - except Exception as e: - logger.warning(f'Could not load the "{revision}" version of {repo_id}. Skipping.') - -def migrate_checkpoints(dest_dir: Path, dest_yaml: io.TextIOBase): - # find any checkpoints referred to in old models.yaml - conf = OmegaConf.load('./configs/models.yaml') - orig_models_dir = Path.cwd() / 'models' - for model_name, stanza in conf.items(): - if stanza.get('format') and stanza['format'] == 'ckpt': - try: - logger.info(f'Migrating checkpoint model {model_name}') - weights = orig_models_dir.parent / stanza['weights'] - config = stanza['config'] - info = ModelProbe().heuristic_probe(weights) - if not info: - continue - - # uh oh, weights is in the old models directory - move it into the new one - if Path(weights).is_relative_to(orig_models_dir): - dest = Path(dest_dir, info.base_type.value, info.model_type.value,weights.name) - copy_file(weights,dest) - weights = Path('models', info.base_type.value, info.model_type.value,weights.name) - model_name = unique_name(model_name, info) - stanza = { - f'{info.base_type.value}/{info.model_type.value}/{model_name}': - { - 'name': model_name, - 'path': str(weights), - 'description': f'{info.base_type.value}-based checkpoint', - 'format': 'checkpoint', - 'image_size': info.image_size, - 'base': info.base_type.value, - 'variant': info.variant_type.value, - 'config': config - } - } - print(yaml.dump(stanza),file=dest_yaml,end="") - dest_yaml.flush() - except KeyboardInterrupt: - raise - except Exception as e: - logger.error(str(e)) - -def main(): - parser = argparse.ArgumentParser(description="Model directory migrator") - parser.add_argument('root_directory', - help='Root directory (containing "models", "embeddings", "controlnets" and "loras")' - ) - parser.add_argument('--dest-directory', - default='./models-3.0', - help='Destination for new models directory', - ) - parser.add_argument('--dest-yaml', - default='./models.yaml-3.0', - help='Destination for new models.yaml file', - ) - args = parser.parse_args() - root_directory = Path(args.root_directory) - assert root_directory.is_dir(), f"{root_directory} is not a valid directory" - assert (root_directory / 'models').is_dir(), f"{root_directory} does not contain a 'models' subdirectory" - - dest_directory = Path(args.dest_directory).resolve() - dest_yaml = Path(args.dest_yaml).resolve() - - os.chdir(root_directory) - with open(dest_yaml,'w') as yaml_file: - yaml_file.write(yaml.dump({'__metadata__': - {'version':'3.0.0'} - } - ) - ) - create_directory_structure(dest_directory) - migrate_support_models(dest_directory) - migrate_conversion_models(dest_directory) - migrate_tuning_models(dest_directory) - migrate_converted(dest_directory,yaml_file) - migrate_pipelines(dest_directory,yaml_file) - migrate_checkpoints(dest_directory,yaml_file) - -if __name__ == '__main__': - main() - From d65c833b900e9ff0027b61dee11098a8300b1f0e Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 22 Jun 2023 16:44:55 -0400 Subject: [PATCH 11/47] migration now integrated into invokeai-configure --- .../backend/install/invokeai_configure.py | 88 ++++++++----------- invokeai/backend/install/migrate_to_3.py | 50 ++++++----- 2 files changed, 63 insertions(+), 75 deletions(-) diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index dc0e18d6fb..3b3c23266e 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -57,12 +57,11 @@ from invokeai.frontend.install.widgets import ( from invokeai.backend.install.legacy_arg_parsing import legacy_parser from invokeai.backend.install.model_install_backend import ( hf_download_from_pretrained, - hf_download_with_resume, InstallSelections, ModelInstall, ) from invokeai.backend.model_management.model_probe import ( - ModelProbe, ModelType, BaseModelType, SchedulerPredictionType + ModelType, BaseModelType ) warnings.filterwarnings("ignore") @@ -442,46 +441,6 @@ to allow InvokeAI to download restricted styles & subjects from the "Concept Lib begin_entry_at=4, scroll_exit=True, ) - # self.nextrely += 1 - # self.add_widget_intelligent( - # npyscreen.FixedText, - # value="Directories containing textual inversion, controlnet and LoRA models ( autocompletes, ctrl-N advances):", - # editable=False, - # color="CONTROL", - # ) - # self.embedding_dir = self.add_widget_intelligent( - # npyscreen.TitleFilename, - # name=" Textual Inversion Embeddings:", - # value=str(default_embedding_dir()), - # select_dir=True, - # must_exist=False, - # use_two_lines=False, - # labelColor="GOOD", - # begin_entry_at=32, - # scroll_exit=True, - # ) - # self.lora_dir = self.add_widget_intelligent( - # npyscreen.TitleFilename, - # name=" LoRA and LyCORIS:", - # value=str(default_lora_dir()), - # select_dir=True, - # must_exist=False, - # use_two_lines=False, - # labelColor="GOOD", - # begin_entry_at=32, - # scroll_exit=True, - # ) - # self.controlnet_dir = self.add_widget_intelligent( - # npyscreen.TitleFilename, - # name=" ControlNets:", - # value=str(default_controlnet_dir()), - # select_dir=True, - # must_exist=False, - # use_two_lines=False, - # labelColor="GOOD", - # begin_entry_at=32, - # scroll_exit=True, - # ) self.nextrely += 1 self.add_widget_intelligent( npyscreen.TitleFixedText, @@ -756,14 +715,42 @@ def migrate_init_file(legacy_format:Path): new.nsfw_checker = old.safety_checker new.xformers_enabled = old.xformers new.conf_path = old.conf -# new.embedding_dir = old.embedding_path + new.root = legacy_format.parent.resolve() invokeai_yaml = legacy_format.parent / 'invokeai.yaml' with open(invokeai_yaml,"w", encoding="utf-8") as outfile: outfile.write(new.to_yaml()) - legacy_format.replace(legacy_format.parent / 'invokeai.init.old') + legacy_format.replace(legacy_format.parent / 'invokeai.init.orig') +# ------------------------------------- +def migrate_models(root: Path): + from invokeai.backend.install.migrate_to_3 import do_migrate + do_migrate(root, root) + +def migrate_if_needed(opt: Namespace, root: Path)->bool: + # We check for to see if the runtime directory is correctly initialized. + old_init_file = root / 'invokeai.init' + new_init_file = root / 'invokeai.yaml' + old_hub = root / 'models/hub' + migration_needed = old_init_file.exists() and not new_init_file.exists() or old_hub.exists() + + if migration_needed: + if opt.yes_to_all or \ + yes_or_no(f'{str(config.root_path)} appears to be a 2.3 format root directory. Convert to version 3.0?'): + + logger.info('** Migrating invokeai.init to invokeai.yaml') + migrate_init_file(old_init_file) + config.parse_args(argv=[],conf=OmegaConf.load(new_init_file)) + + if old_hub.exists(): + migrate_models(config.root_path) + else: + print('Cannot continue without conversion. Aborting.') + + return migration_needed + + # ------------------------------------- def main(): parser = argparse.ArgumentParser(description="InvokeAI model downloader") @@ -829,19 +816,16 @@ def main(): errors = set() try: - # We check for to see if the runtime directory is correctly initialized. - old_init_file = config.root_path / 'invokeai.init' - new_init_file = config.root_path / 'invokeai.yaml' - if old_init_file.exists() and not new_init_file.exists(): - logger.info('** Migrating invokeai.init to invokeai.yaml') - migrate_init_file(old_init_file) - # Load new init file into config - config.parse_args(argv=[],conf=OmegaConf.load(new_init_file)) + # if we do a root migration/upgrade, then we are keeping previous + # configuration and we are done. + if migrate_if_needed(opt, config.root_path): + sys.exit(0) if not config.model_conf_path.exists(): initialize_rootdir(config.root_path, opt.yes_to_all) models_to_download = default_user_selections(opt) + new_init_file = config.root_path / 'invokeai.yaml' if opt.yes_to_all: write_default_options(opt, new_init_file) init_options = Namespace( diff --git a/invokeai/backend/install/migrate_to_3.py b/invokeai/backend/install/migrate_to_3.py index d7c09c4756..7eed3e51d4 100644 --- a/invokeai/backend/install/migrate_to_3.py +++ b/invokeai/backend/install/migrate_to_3.py @@ -408,7 +408,32 @@ def get_legacy_embeddings(root: Path) -> ModelPaths: path = root / 'invokeai.yaml' if path.exists(): return _parse_legacy_yamlfile(root, path) - + +def do_migrate(src_directory: Path, dest_directory: Path): + + dest_models = dest_directory / 'models-3.0' + dest_yaml = dest_directory / 'configs/models.yaml-3.0' + + paths = get_legacy_embeddings(src_directory) + + with open(dest_yaml,'w') as yaml_file: + migrator = MigrateTo3(src_directory, + dest_models, + yaml_file, + src_paths = paths, + ) + migrator.migrate() + + (dest_directory / 'models').replace(dest_directory / 'models.orig') + dest_models.replace(dest_directory / 'models') + + (dest_directory /'configs/models.yaml').replace(dest_directory / 'configs/models.yaml.orig') + dest_yaml.replace(dest_directory / 'configs/models.yaml') + print(f"""Migration successful. +Original models directory moved to {dest_directory}/models.orig +Original models.yaml file moved to {dest_directory}/configs/models.yaml.orig +""") + def main(): parser = argparse.ArgumentParser(prog="invokeai-migrate3", description=""" @@ -446,28 +471,7 @@ script, which will perform a full upgrade in place.""" assert (dest_directory / 'models').is_dir(), f"{dest_directory} does not contain a 'models' subdirectory" assert (dest_directory / 'invokeai.yaml').exists(), f"{dest_directory} does not contain an InvokeAI init file." - dest_models = dest_directory / 'models-3.0' - dest_yaml = dest_directory / 'configs/models.yaml-3.0' - - paths = get_legacy_embeddings(root_directory) - - with open(dest_yaml,'w') as yaml_file: - migrator = MigrateTo3(root_directory, - dest_models, - yaml_file, - src_paths = paths, - ) - migrator.migrate() - - (dest_directory / 'models').replace(dest_directory / 'models.orig') - dest_models.replace(dest_directory / 'models') - - (dest_directory /'configs/models.yaml').replace(dest_directory / 'configs/models.yaml.orig') - dest_yaml.replace(dest_directory / 'configs/models.yaml') - print(f"""Migration successful. -Original models directory moved to {dest_directory}/models.orig -Original models.yaml file moved to {dest_directory}/configs/models.yaml.orig -""") + do_migrate(root_directory,dest_directory) if __name__ == '__main__': main() From a9104030037abed7bafe9461b4a1af229d17ab68 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 22 Jun 2023 21:10:31 -0400 Subject: [PATCH 12/47] correctly migrate models that have relative paths --- invokeai/backend/install/migrate_to_3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/install/migrate_to_3.py b/invokeai/backend/install/migrate_to_3.py index 7eed3e51d4..171c86f7e0 100644 --- a/invokeai/backend/install/migrate_to_3.py +++ b/invokeai/backend/install/migrate_to_3.py @@ -307,7 +307,7 @@ class MigrateTo3(object): # handle relative paths dest_dir = self.dest_models - location = self.src_paths.models / location + location = self.root_directory / location info = ModelProbe().heuristic_probe(location) if not info: From 56bd873d7a8e94c0689b250da4d0d410069efe49 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 23 Jun 2023 10:52:59 -0400 Subject: [PATCH 13/47] make relative model paths work in model manager --- invokeai/backend/model_management/model_manager.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 4e2c789c07..c7fb7569db 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -384,7 +384,8 @@ class ModelManager(object): # if it known model check that target path exists (if manualy deleted) else: # logic repeated twice(in rescan too) any way to optimize? - if not os.path.exists(self.models[model_key].path): + model_path = self.globals.root_path / self.models[model_key].path + if not model_path.exists(): if model_class.save_to_config: self.models[model_key].error = ModelError.NotFound raise Exception(f"Files for model \"{model_key}\" not found") @@ -395,13 +396,13 @@ class ModelManager(object): # reset model errors? - - model_config = self.models[model_key] # /models/{base_model}/{model_type}/{name}.ckpt or .safentesors # /models/{base_model}/{model_type}/{name}/ - model_path = model_config.path + # massage relative paths into absolute ones + model_path = model_path or self.globals.root_path / model_config.path + model_config.path = model_path # vae/movq override # TODO: @@ -415,7 +416,7 @@ class ModelManager(object): # TODO: path # TODO: is it accurate to use path as id - dst_convert_path = self.globals.models_dir / ".cache" / hashlib.md5(model_path.encode()).hexdigest() + dst_convert_path = self.globals.models_dir / ".cache" / hashlib.md5(str(model_path).encode()).hexdigest() model_path = model_class.convert_if_required( base_model=base_model, model_path=model_path, From 3043af46204907dd05f0057f423c6f19afffdbc7 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 23 Jun 2023 13:56:30 -0400 Subject: [PATCH 14/47] implement vae passthru --- invokeai/backend/install/migrate_to_3.py | 169 ++++++++++++++++++----- 1 file changed, 136 insertions(+), 33 deletions(-) diff --git a/invokeai/backend/install/migrate_to_3.py b/invokeai/backend/install/migrate_to_3.py index 171c86f7e0..5e9a194125 100644 --- a/invokeai/backend/install/migrate_to_3.py +++ b/invokeai/backend/install/migrate_to_3.py @@ -15,7 +15,9 @@ import warnings from dataclasses import dataclass from pathlib import Path -from omegaconf import OmegaConf +from omegaconf import OmegaConf, DictConfig +from typing import Union + from diffusers import StableDiffusionPipeline, AutoencoderKL from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from transformers import ( @@ -104,6 +106,9 @@ class MigrateTo3(object): ''' copy a single file with logging ''' + if dest.exists(): + logger.info(f'Skipping existing {str(dest)}') + return logger.info(f'Copying {str(src)} to {str(dest)}') try: shutil.copy(src, dest) @@ -115,6 +120,10 @@ class MigrateTo3(object): ''' Recursively copy a directory with logging ''' + if dest.exists(): + logger.info(f'Skipping existing {str(dest)}') + return + logger.info(f'Copying {str(src)} to {str(dest)}') try: shutil.copytree(src, dest) @@ -127,7 +136,6 @@ class MigrateTo3(object): that looks like a model, and copy the model into the appropriate location within the destination models directory. ''' - dest_dir = self.dest_models for root, dirs, files in os.walk(src_dir): for f in files: # hack - don't copy raw learned_embeds.bin, let them @@ -139,7 +147,7 @@ class MigrateTo3(object): info = ModelProbe().heuristic_probe(model) if not info: continue - dest = Path(dest_dir, info.base_type.value, info.model_type.value, f) + dest = self._model_probe_to_path(info) / f self.copy_file(model, dest) except KeyboardInterrupt: raise @@ -151,14 +159,13 @@ class MigrateTo3(object): info = ModelProbe().heuristic_probe(model) if not info: continue - dest = Path(dest_dir, info.base_type.value, info.model_type.value, model.name) + dest = self._model_probe_to_path(info) / model.name self.copy_dir(model, dest) except KeyboardInterrupt: raise except Exception as e: logger.error(str(e)) - # TO DO: Rewrite this to support alternate locations for esrgan and gfpgan in init file def migrate_support_models(self): ''' Copy the clipseg, upscaler, and restoration models to their new @@ -203,39 +210,53 @@ class MigrateTo3(object): logger.info('Migrating core tokenizers and text encoders') target_dir = dest_directory / 'core' / 'convert' - # bert - bert = BertTokenizerFast.from_pretrained("bert-base-uncased", **kwargs) - bert.save_pretrained(target_dir / 'bert-base-uncased', safe_serialization=True) + self._migrate_pretrained(BertTokenizerFast, + repo_id='bert-base-uncased', + dest = target_dir / 'bert-base-uncased', + **kwargs) # sd-1 repo_id = 'openai/clip-vit-large-patch14' - pipeline = CLIPTokenizer.from_pretrained(repo_id, **kwargs) - pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14' / 'tokenizer', safe_serialization=True) - - pipeline = CLIPTextModel.from_pretrained(repo_id, **kwargs) - pipeline.save_pretrained(target_dir / 'clip-vit-large-patch14' / 'text_encoder', safe_serialization=True) + self._migrate_pretrained(CLIPTokenizer, + repo_id= repo_id, + dest= target_dir / 'clip-vit-large-patch14' / 'tokenizer', + **kwargs) + self._migrate_pretrained(CLIPTextModel, + repo_id = repo_id, + dest = target_dir / 'clip-vit-large-patch14' / 'text_encoder', + **kwargs) # sd-2 repo_id = "stabilityai/stable-diffusion-2" - pipeline = CLIPTokenizer.from_pretrained(repo_id, subfolder="tokenizer", **kwargs) - pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'tokenizer', safe_serialization=True) - - pipeline = CLIPTextModel.from_pretrained(repo_id, subfolder="text_encoder", **kwargs) - pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'text_encoder', safe_serialization=True) + self._migrate_pretrained(CLIPTokenizer, + repo_id = repo_id, + dest = target_dir / 'stable-diffusion-2-clip' / 'tokenizer', + **{'subfolder':'tokenizer',**kwargs} + ) + self._migrate_pretrained(CLIPTextModel, + repo_id = repo_id, + dest = target_dir / 'stable-diffusion-2-clip' / 'text_encoder', + **{'subfolder':'text_encoder',**kwargs} + ) # VAE logger.info('Migrating stable diffusion VAE') - vae = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse', **kwargs) - vae.save_pretrained(target_dir / 'sd-vae-ft-mse', safe_serialization=True) - + self._migrate_pretrained(AutoencoderKL, + repo_id = 'stabilityai/sd-vae-ft-mse', + dest = target_dir / 'sd-vae-ft-mse', + **kwargs) + # safety checking logger.info('Migrating safety checker') repo_id = "CompVis/stable-diffusion-safety-checker" - pipeline = AutoFeatureExtractor.from_pretrained(repo_id,**kwargs) - pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True) - - pipeline = StableDiffusionSafetyChecker.from_pretrained(repo_id,**kwargs) - pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True) + self._migrate_pretrained(AutoFeatureExtractor, + repo_id = repo_id, + dest = target_dir / 'stable-diffusion-safety-checker', + **kwargs) + self._migrate_pretrained(StableDiffusionSafetyChecker, + repo_id = repo_id, + dest = target_dir / 'stable-diffusion-safety-checker', + **kwargs) except KeyboardInterrupt: raise except Exception as e: @@ -262,8 +283,72 @@ class MigrateTo3(object): } self.dest_yaml.write(yaml.dump(stanza)) self.dest_yaml.flush() + + def _model_probe_to_path(self, info: ModelProbeInfo)->Path: + return Path(self.dest_models, info.base_type.value, info.model_type.value) - def migrate_repo_id(self, repo_id: str, model_name :str=None): + def _migrate_pretrained(self, model_class, repo_id: str, dest: Path, **kwargs): + if dest.exists(): + logger.info(f'Skipping existing {dest}') + return + model = model_class.from_pretrained(repo_id, **kwargs) + self._save_pretrained(model, dest) + + def _save_pretrained(self, model, dest: Path): + if dest.exists(): + logger.info(f'Skipping existing {dest}') + return + model_name = dest.name + download_path = dest.with_name(f'{model_name}.downloading') + model.save_pretrained(download_path, safe_serialization=True) + download_path.replace(dest) + + def _download_vae(self, repo_id: str, subfolder:str=None)->Path: + vae = AutoencoderKL.from_pretrained(repo_id, cache_dir=self.root_directory / 'models/hub', subfolder=subfolder) + info = ModelProbe().heuristic_probe(vae) + _, model_name = repo_id.split('/') + dest = self._model_probe_to_path(info) / self.unique_name(model_name, info) + vae.save_pretrained(dest, safe_serialization=True) + return dest + + def _vae_path(self, vae: Union[str,dict])->Path: + ''' + Convert 2.3 VAE stanza to a straight path. + ''' + vae_path = None + + # First get a path + if isinstance(vae,str): + vae_path = vae + + elif isinstance(vae,DictConfig): + if p := vae.get('path'): + vae_path = p + elif repo_id := vae.get('repo_id'): + if repo_id=='stabilityai/sd-vae-ft-mse': # this guy is already downloaded + vae_path = 'models/core/convert/se-vae-ft-mse' + else: + vae_path = self._download_vae(repo_id, vae.get('subfolder')) + + assert vae_path is not None, "Couldn't find VAE for this model" + + # if the VAE is in the old models directory, then we must move it into the new + # one. VAEs outside of this directory can stay where they are. + vae_path = Path(vae_path) + if vae_path.is_relative_to(self.src_paths.models): + info = ModelProbe().heuristic_probe(vae_path) + dest = self._model_probe_to_path(info) / vae_path.name + if not dest.exists(): + self.copy_dir(vae_path,dest) + vae_path = dest + + if vae_path.is_relative_to(self.dest_models): + rel_path = vae_path.relative_to(self.dest_models) + return Path('models',rel_path) + else: + return vae_path + + def migrate_repo_id(self, repo_id: str, model_name :str=None, **extra_config): ''' Migrate a locally-cached diffusers pipeline identified with a repo_id ''' @@ -295,10 +380,11 @@ class MigrateTo3(object): if not info: return - dest = Path(dest_dir, info.base_type.value, info.model_type.value, f'{repo_name}') - pipeline.save_pretrained(dest, safe_serialization=True) + dest = self._model_probe_to_path(info) / repo_name + self._save_pretrained(pipeline, dest) + rel_path = Path('models',dest.relative_to(dest_dir)) - self.write_yaml(model_name, path=rel_path, info=info) + self.write_yaml(model_name, path=rel_path, info=info, **extra_config) def migrate_path(self, location: Path, model_name: str=None, **extra_config): ''' @@ -332,16 +418,29 @@ class MigrateTo3(object): for model_name, stanza in conf.items(): try: + passthru_args = {} + + if vae := stanza.get('vae'): + try: + passthru_args['vae'] = str(self._vae_path(vae)) + except Exception as e: + logger.warning(f'Could not find a VAE matching "{vae}" for model "{model_name}"') + logger.warning(str(e)) + + if config := stanza.get('config'): + passthru_args['config'] = config + if repo_id := stanza.get('repo_id'): logger.info(f'Migrating diffusers model {model_name}') - self.migrate_repo_id(repo_id, model_name) + self.migrate_repo_id(repo_id, model_name, **passthru_args) elif location := stanza.get('weights'): logger.info(f'Migrating checkpoint model {model_name}') - self.migrate_path(Path(location), model_name, config=stanza.get('config')) + self.migrate_path(Path(location), model_name, **passthru_args) + elif location := stanza.get('path'): logger.info(f'Migrating diffusers model {model_name}') - self.migrate_path(Path(location), model_name, config=stanza.get('config')) + self.migrate_path(Path(location), model_name, **passthru_args) except KeyboardInterrupt: raise @@ -424,6 +523,7 @@ def do_migrate(src_directory: Path, dest_directory: Path): ) migrator.migrate() + shutil.rmtree(dest_directory / 'models.orig', ignore_errors=True) (dest_directory / 'models').replace(dest_directory / 'models.orig') dest_models.replace(dest_directory / 'models') @@ -456,6 +556,7 @@ script, which will perform a full upgrade in place.""" required=True, help='Destination InvokeAI 3.0 directory (containing "invokeai.yaml")' ) +# TO DO: Implement full directory scanning # parser.add_argument('--all-models', # action="store_true", # help='Migrate all models found in `models` directory, not just those mentioned in models.yaml', @@ -476,3 +577,5 @@ script, which will perform a full upgrade in place.""" if __name__ == '__main__': main() + + From 54b74427f4376716c3b30d5f4ee3aa3fd0c2daa4 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 23 Jun 2023 14:13:37 -0400 Subject: [PATCH 15/47] adjust for change in list_models() API --- .../backend/install/model_install_backend.py | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index a26ec0ef72..2955f72252 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -121,21 +121,21 @@ class ModelInstall(object): # supplement with entries in models.yaml installed_models = self.mgr.list_models() - for base in installed_models.keys(): - for model_type in installed_models[base].keys(): - for name, value in installed_models[base][model_type].items(): - key = ModelManager.create_key(name, base, model_type) - if key in model_dict: - model_dict[key].installed = True - else: - model_dict[key] = ModelLoadInfo( - name = name, - base_type = base, - model_type = model_type, -# description = value.get('description'), - path = value.get('path'), - installed = True, - ) + for md in installed_models: + base = md['base_model'] + model_type = md['type'] + name = md['name'] + key = ModelManager.create_key(name, base, model_type) + if key in model_dict: + model_dict[key].installed = True + else: + model_dict[key] = ModelLoadInfo( + name = name, + base_type = base, + model_type = model_type, + path = value.get('path'), + installed = True, + ) return {x : model_dict[x] for x in sorted(model_dict.keys(),key=lambda y: model_dict[y].name.lower())} def starter_models(self)->Set[str]: @@ -316,7 +316,7 @@ class ModelInstall(object): attributes = dict( path = str(path), description = str(description), - format = info.format, + model_format = info.format, ) if info.model_type == ModelType.Pipeline: attributes.update( From 466ec3ab5eb3629f08dd6236c07cba35ee61c498 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 23 Jun 2023 16:35:39 -0400 Subject: [PATCH 16/47] add router API support for model manager heuristic_import()` --- invokeai/app/api/routers/models.py | 31 +++++++++++-- .../backend/install/model_install_backend.py | 3 +- .../backend/model_management/model_manager.py | 43 ++++++++++++++++--- .../backend/model_management/model_probe.py | 14 +++--- .../backend/model_management/models/base.py | 1 - invokeai/configs/INITIAL_MODELS.yaml | 1 + 6 files changed, 74 insertions(+), 19 deletions(-) diff --git a/invokeai/app/api/routers/models.py b/invokeai/app/api/routers/models.py index 50d645eb57..0b03c8e729 100644 --- a/invokeai/app/api/routers/models.py +++ b/invokeai/app/api/routers/models.py @@ -1,13 +1,13 @@ # Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) and 2023 Kent Keirsey (https://github.com/hipsterusername) -from typing import Annotated, Literal, Optional, Union, Dict +from typing import Literal, Optional, Union from fastapi import Query from fastapi.routing import APIRouter, HTTPException from pydantic import BaseModel, Field, parse_obj_as from ..dependencies import ApiDependencies from invokeai.backend import BaseModelType, ModelType -from invokeai.backend.model_management.models import OPENAPI_MODEL_CONFIGS +from invokeai.backend.model_management.models import OPENAPI_MODEL_CONFIGS, SchedulerPredictionType MODEL_CONFIGS = Union[tuple(OPENAPI_MODEL_CONFIGS)] models_router = APIRouter(prefix="/v1/models", tags=["models"]) @@ -51,11 +51,14 @@ class CreateModelResponse(BaseModel): info: Union[CkptModelInfo, DiffusersModelInfo] = Field(discriminator="format", description="The model info") status: str = Field(description="The status of the API response") +class ImportModelRequest(BaseModel): + name: str = Field(description="A model path, repo_id or URL to import") + prediction_type: Optional[Literal['epsilon','v_prediction','sample']] = Field(description='Prediction type for SDv2 checkpoint files') + class ConversionRequest(BaseModel): name: str = Field(description="The name of the new model") info: CkptModelInfo = Field(description="The converted model info") save_location: str = Field(description="The path to save the converted model weights") - class ConvertedModelResponse(BaseModel): name: str = Field(description="The name of the new model") @@ -105,6 +108,28 @@ async def update_model( return model_response +@models_router.post( + "/", + operation_id="import_model", + responses={200: {"status": "success"}}, +) +async def import_model( + model_request: ImportModelRequest +) -> None: + """ Add Model """ + items_to_import = set([model_request.name]) + prediction_types = { x.value: x for x in SchedulerPredictionType } + logger = ApiDependencies.invoker.services.logger + + installed_models = ApiDependencies.invoker.services.model_manager.heuristic_import( + items_to_import = items_to_import, + prediction_type_helper = lambda x: prediction_types.get(model_request.prediction_type) + ) + if len(installed_models) > 0: + logger.info(f'Successfully imported {model_request.name}') + else: + logger.error(f'Model {model_request.name} not imported') + raise HTTPException(status_code=500, detail=f'Model {model_request.name} not imported') @models_router.delete( "/{model_name}", diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 2955f72252..d37f67ba91 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -93,9 +93,10 @@ class ModelInstall(object): def __init__(self, config:InvokeAIAppConfig, prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None, + model_manager: ModelManager = None, access_token:str = None): self.config = config - self.mgr = ModelManager(config.model_conf_path) + self.mgr = model_manager or ModelManager(config.model_conf_path) self.datasets = OmegaConf.load(Dataset_path) self.prediction_helper = prediction_type_helper self.access_token = access_token or HfFolder.get_token() diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index c7fb7569db..eafc283b1d 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -151,13 +151,11 @@ import os import hashlib import textwrap from dataclasses import dataclass -from packaging import version from pathlib import Path -from typing import Dict, Optional, List, Tuple, Union, types +from typing import Optional, List, Tuple, Union, Set, Callable, types from shutil import rmtree import torch -from huggingface_hub import scan_cache_dir from omegaconf import OmegaConf from omegaconf.dictconfig import DictConfig @@ -165,9 +163,13 @@ from pydantic import BaseModel import invokeai.backend.util.logging as logger from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.util import CUDA_DEVICE, download_with_resume +from invokeai.backend.util import CUDA_DEVICE from .model_cache import ModelCache, ModelLocker -from .models import BaseModelType, ModelType, SubModelType, ModelError, MODEL_CLASSES +from .models import ( + BaseModelType, ModelType, SubModelType, + ModelError, SchedulerPredictionType, MODEL_CLASSES, + ModelConfigBase, + ) # We are only starting to number the config file with release 3. # The config file version doesn't have to start at release version, but it will help @@ -686,3 +688,34 @@ class ModelManager(object): if new_models_found and self.config_path: self.commit() + + + def heuristic_import(self, + items_to_import: Set[str], + prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None, + )->Set[str]: + ''' + Import a list of paths, repo_ids or URLs. Returns the + set of successfully imported items. The prediction_type_helper + is a callback that receives the Path of a checkpoint or diffusers + model and returns a SchedulerPredictionType (or None). + ''' + # avoid circular import here + from invokeai.backend.install.model_install_backend import ModelInstall + successfully_installed = set() + + installer = ModelInstall(config = self.globals, + prediction_type_helper = prediction_type_helper, + model_manager = self) + for thing in items_to_import: + try: + installer.heuristic_install(thing) + successfully_installed.add(thing) + except Exception as e: + self.logger.warning(f'{thing} could not be imported: {str(e)}') + + self.commit() + return successfully_installed + + + diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py index ec3146a00e..9aeb829a25 100644 --- a/invokeai/backend/model_management/model_probe.py +++ b/invokeai/backend/model_management/model_probe.py @@ -1,17 +1,14 @@ import json -import traceback import torch import safetensors.torch from dataclasses import dataclass -from enum import Enum -from diffusers import ModelMixin, ConfigMixin, StableDiffusionPipeline, AutoencoderKL, ControlNetModel +from diffusers import ModelMixin, ConfigMixin from pathlib import Path from typing import Callable, Literal, Union, Dict from picklescan.scanner import scan_file_path -import invokeai.backend.util.logging as logger from .models import BaseModelType, ModelType, ModelVariantType, SchedulerPredictionType, SilenceWarnings @dataclass @@ -102,7 +99,7 @@ class ModelProbe(object): and prediction_type==SchedulerPredictionType.VPrediction \ ) else 512, ) - except Exception as e: + except Exception: return None return model_info @@ -115,6 +112,9 @@ class ModelProbe(object): return ModelType.TextualInversion checkpoint = checkpoint or cls._scan_and_load_checkpoint(model_path) state_dict = checkpoint.get("state_dict") or checkpoint + + if len(checkpoint) < 10 and all(isinstance(v, torch.Tensor) for v in checkpoint.values()): + return ModelType.TextualInversion if any([x.startswith("model.diffusion_model") for x in state_dict.keys()]): return ModelType.Pipeline if any([x.startswith("encoder.conv_in") for x in state_dict.keys()]): @@ -326,13 +326,9 @@ class PipelineFolderProbe(FolderProbeBase): def get_base_type(self)->BaseModelType: if self.model: unet_conf = self.model.unet.config - scheduler_conf = self.model.scheduler.config else: with open(self.folder_path / 'unet' / 'config.json','r') as file: unet_conf = json.load(file) - with open(self.folder_path / 'scheduler' / 'scheduler_config.json','r') as file: - scheduler_conf = json.load(file) - if unet_conf['cross_attention_dim'] == 768: return BaseModelType.StableDiffusion1 elif unet_conf['cross_attention_dim'] == 1024: diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py index ef354ecc07..0b36cd4235 100644 --- a/invokeai/backend/model_management/models/base.py +++ b/invokeai/backend/model_management/models/base.py @@ -56,7 +56,6 @@ class ModelConfigBase(BaseModel): class Config: use_enum_values = True - class EmptyConfigLoader(ConfigMixin): @classmethod def load_config(cls, *args, **kwargs): diff --git a/invokeai/configs/INITIAL_MODELS.yaml b/invokeai/configs/INITIAL_MODELS.yaml index e2288c43fd..9ff9efc3f5 100644 --- a/invokeai/configs/INITIAL_MODELS.yaml +++ b/invokeai/configs/INITIAL_MODELS.yaml @@ -45,6 +45,7 @@ sd-1/pipeline/portraitplus: repo_id: wavymulder/portraitplus recommended: False sd-1/pipeline/seek.art_MEGA: + repo_id: coreco/seek.art_MEGA description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB) recommended: False sd-1/pipeline/trinart_stable_diffusion_v2: From 539d1f3bdec07346f4a07edf7d14d084e9967ac0 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Fri, 23 Jun 2023 16:54:52 -0400 Subject: [PATCH 17/47] remove redundant prediction_type and attention_upscaling flags --- .../backend/install/invokeai_configure.py | 4 ++-- .../backend/install/model_install_backend.py | 22 +++---------------- .../models/stable_diffusion.py | 7 +----- 3 files changed, 6 insertions(+), 27 deletions(-) diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index 3b3c23266e..48ad76a0a0 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -631,8 +631,8 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False): } ) ) - with open(root / 'invokeai.yaml','w') as f: - f.write('#empty invokeai.yaml initialization file') +# with open(root / 'invokeai.yaml','w') as f: +# f.write('#empty invokeai.yaml initialization file') # ------------------------------------- def run_console_ui( diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index d37f67ba91..457f8b593f 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -3,8 +3,6 @@ Utility (backend) functions used by model_install.py """ import os import shutil -import sys -import traceback import warnings from dataclasses import dataclass,field from pathlib import Path @@ -12,10 +10,9 @@ from tempfile import TemporaryDirectory from typing import List, Dict, Callable, Union, Set import requests -from diffusers import AutoencoderKL, StableDiffusionPipeline +from diffusers import StableDiffusionPipeline from huggingface_hub import hf_hub_url, HfFolder, HfApi from omegaconf import OmegaConf -from omegaconf.dictconfig import DictConfig from tqdm import tqdm import invokeai.configs as configs @@ -24,7 +21,6 @@ from invokeai.app.services.config import InvokeAIAppConfig from invokeai.backend.model_management import ModelManager, ModelType, BaseModelType, ModelVariantType from invokeai.backend.model_management.model_probe import ModelProbe, SchedulerPredictionType, ModelProbeInfo from invokeai.backend.util import download_with_resume -from ..stable_diffusion import StableDiffusionGeneratorPipeline from ..util.logging import InvokeAILogger warnings.filterwarnings("ignore") @@ -290,7 +286,7 @@ class ModelInstall(object): location = self._download_hf_model(repo_id, files, staging) break elif f'learned_embeds.{suffix}' in files: - location = self._download_hf_model(repo_id, [f'learned_embeds.suffix'], staging) + location = self._download_hf_model(repo_id, ['learned_embeds.suffix'], staging) break if not location: logger.warning(f'Could not determine type of repo {repo_id}. Skipping install.') @@ -307,7 +303,6 @@ class ModelInstall(object): self._install_path(dest, info) def _make_attributes(self, path: Path, info: ModelProbeInfo)->dict: - # convoluted way to retrieve the description from datasets description = f'{info.base_type.value} {info.model_type.value} model' if key := self.reverse_paths.get(self.current_id): @@ -320,18 +315,7 @@ class ModelInstall(object): model_format = info.format, ) if info.model_type == ModelType.Pipeline: - attributes.update( - dict( - variant = info.variant_type, - ) - ) - if info.base_type == BaseModelType.StableDiffusion2: - attributes.update( - dict( - prediction_type = info.prediction_type, - upcast_attention = info.prediction_type == SchedulerPredictionType.VPrediction, - ) - ) + attributes.update(dict(variant = info.variant_type,)) if info.format=="checkpoint": try: legacy_conf = LEGACY_CONFIGS[info.base_type][info.variant_type][info.prediction_type] if info.base_type == BaseModelType.StableDiffusion2 \ diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py index f169326571..aa7d05b766 100644 --- a/invokeai/backend/model_management/models/stable_diffusion.py +++ b/invokeai/backend/model_management/models/stable_diffusion.py @@ -131,17 +131,12 @@ class StableDiffusion2Model(DiffusersModel): model_format: Literal[StableDiffusion2ModelFormat.Diffusers] vae: Optional[str] = Field(None) variant: ModelVariantType - prediction_type: SchedulerPredictionType - upcast_attention: bool class CheckpointConfig(ModelConfigBase): model_format: Literal[StableDiffusion2ModelFormat.Checkpoint] vae: Optional[str] = Field(None) - config: Optional[str] = Field(None) + config: str variant: ModelVariantType - prediction_type: SchedulerPredictionType - upcast_attention: bool - def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): assert base_model == BaseModelType.StableDiffusion2 From ba1371a88fddc0d4b34ccd736f7e0edc2dd56dfe Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 24 Jun 2023 11:45:49 -0400 Subject: [PATCH 18/47] rename ModelType.Pipeline to ModelType.Main --- invokeai/app/invocations/model.py | 2 +- .../backend/install/invokeai_configure.py | 2 +- invokeai/backend/install/migrate_to_3.py | 2 +- .../backend/install/model_install_backend.py | 6 ++-- .../backend/model_management/model_probe.py | 10 +++---- .../model_management/models/__init__.py | 6 ++-- .../backend/model_management/models/base.py | 2 +- .../models/stable_diffusion.py | 8 +++--- invokeai/configs/INITIAL_MODELS.yaml | 28 +++++++++---------- invokeai/frontend/install/model_install.py | 4 +-- 10 files changed, 35 insertions(+), 35 deletions(-) diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index b77aa5dafd..7490414bce 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -73,7 +73,7 @@ class PipelineModelLoaderInvocation(BaseInvocation): base_model = self.model.base_model model_name = self.model.model_name - model_type = ModelType.Pipeline + model_type = ModelType.Main # TODO: not found exceptions if not context.services.model_manager.model_exists( diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index 48ad76a0a0..5f9612fcea 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -618,7 +618,7 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False): dest = root / 'models' for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]: - for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora, + for model_type in [ModelType.Main, ModelType.Vae, ModelType.Lora, ModelType.ControlNet,ModelType.TextualInversion]: path = dest / model_base.value / model_type.value path.mkdir(parents=True, exist_ok=True) diff --git a/invokeai/backend/install/migrate_to_3.py b/invokeai/backend/install/migrate_to_3.py index 5e9a194125..77a476e5c5 100644 --- a/invokeai/backend/install/migrate_to_3.py +++ b/invokeai/backend/install/migrate_to_3.py @@ -94,7 +94,7 @@ class MigrateTo3(object): Create the basic directory structure for the models folder. ''' for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]: - for model_type in [ModelType.Pipeline, ModelType.Vae, ModelType.Lora, + for model_type in [ModelType.Main, ModelType.Vae, ModelType.Lora, ModelType.ControlNet,ModelType.TextualInversion]: path = self.dest_models / model_base.value / model_type.value path.mkdir(parents=True, exist_ok=True) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 457f8b593f..1e50c0c9b6 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -139,7 +139,7 @@ class ModelInstall(object): models = set() for key, value in self.datasets.items(): name,base,model_type = ModelManager.parse_key(key) - if model_type==ModelType.Pipeline: + if model_type==ModelType.Main: models.add(key) return models @@ -224,7 +224,7 @@ class ModelInstall(object): try: logger.info(f'Probing {path}') info = info or ModelProbe().heuristic_probe(path,self.prediction_helper) - if info.model_type == ModelType.Pipeline: + if info.model_type == ModelType.Main: model_name = path.stem if info.format=='checkpoint' else path.name if self.mgr.model_exists(model_name, info.base_type, info.model_type): raise Exception(f'A model named "{model_name}" is already installed.') @@ -314,7 +314,7 @@ class ModelInstall(object): description = str(description), model_format = info.format, ) - if info.model_type == ModelType.Pipeline: + if info.model_type == ModelType.Main: attributes.update(dict(variant = info.variant_type,)) if info.format=="checkpoint": try: diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py index 9aeb829a25..17c76250b9 100644 --- a/invokeai/backend/model_management/model_probe.py +++ b/invokeai/backend/model_management/model_probe.py @@ -33,7 +33,7 @@ class ModelProbe(object): } CLASS2TYPE = { - 'StableDiffusionPipeline' : ModelType.Pipeline, + 'StableDiffusionPipeline' : ModelType.Main, 'AutoencoderKL' : ModelType.Vae, 'ControlNetModel' : ModelType.ControlNet, } @@ -116,7 +116,7 @@ class ModelProbe(object): if len(checkpoint) < 10 and all(isinstance(v, torch.Tensor) for v in checkpoint.values()): return ModelType.TextualInversion if any([x.startswith("model.diffusion_model") for x in state_dict.keys()]): - return ModelType.Pipeline + return ModelType.Main if any([x.startswith("encoder.conv_in") for x in state_dict.keys()]): return ModelType.Vae if "string_to_token" in state_dict or "emb_params" in state_dict: @@ -207,7 +207,7 @@ class CheckpointProbeBase(ProbeBase): def get_variant_type(self)-> ModelVariantType: model_type = ModelProbe.get_model_type_from_checkpoint(self.checkpoint_path,self.checkpoint) - if model_type != ModelType.Pipeline: + if model_type != ModelType.Main: return ModelVariantType.Normal state_dict = self.checkpoint.get('state_dict') or self.checkpoint in_channels = state_dict[ @@ -409,12 +409,12 @@ class LoRAFolderProbe(FolderProbeBase): return LoRACheckpointProbe(model_file,None).get_base_type() ############## register probe classes ###### -ModelProbe.register_probe('diffusers', ModelType.Pipeline, PipelineFolderProbe) +ModelProbe.register_probe('diffusers', ModelType.Main, PipelineFolderProbe) ModelProbe.register_probe('diffusers', ModelType.Vae, VaeFolderProbe) ModelProbe.register_probe('diffusers', ModelType.Lora, LoRAFolderProbe) ModelProbe.register_probe('diffusers', ModelType.TextualInversion, TextualInversionFolderProbe) ModelProbe.register_probe('diffusers', ModelType.ControlNet, ControlNetFolderProbe) -ModelProbe.register_probe('checkpoint', ModelType.Pipeline, PipelineCheckpointProbe) +ModelProbe.register_probe('checkpoint', ModelType.Main, PipelineCheckpointProbe) ModelProbe.register_probe('checkpoint', ModelType.Vae, VaeCheckpointProbe) ModelProbe.register_probe('checkpoint', ModelType.Lora, LoRACheckpointProbe) ModelProbe.register_probe('checkpoint', ModelType.TextualInversion, TextualInversionCheckpointProbe) diff --git a/invokeai/backend/model_management/models/__init__.py b/invokeai/backend/model_management/models/__init__.py index 6975d45f93..87b0ad3c4e 100644 --- a/invokeai/backend/model_management/models/__init__.py +++ b/invokeai/backend/model_management/models/__init__.py @@ -11,21 +11,21 @@ from .textual_inversion import TextualInversionModel MODEL_CLASSES = { BaseModelType.StableDiffusion1: { - ModelType.Pipeline: StableDiffusion1Model, + ModelType.Main: StableDiffusion1Model, ModelType.Vae: VaeModel, ModelType.Lora: LoRAModel, ModelType.ControlNet: ControlNetModel, ModelType.TextualInversion: TextualInversionModel, }, BaseModelType.StableDiffusion2: { - ModelType.Pipeline: StableDiffusion2Model, + ModelType.Main: StableDiffusion2Model, ModelType.Vae: VaeModel, ModelType.Lora: LoRAModel, ModelType.ControlNet: ControlNetModel, ModelType.TextualInversion: TextualInversionModel, }, #BaseModelType.Kandinsky2_1: { - # ModelType.Pipeline: Kandinsky2_1Model, + # ModelType.Main: Kandinsky2_1Model, # ModelType.MoVQ: MoVQModel, # ModelType.Lora: LoRAModel, # ModelType.ControlNet: ControlNetModel, diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py index 0b36cd4235..beaac40109 100644 --- a/invokeai/backend/model_management/models/base.py +++ b/invokeai/backend/model_management/models/base.py @@ -18,7 +18,7 @@ class BaseModelType(str, Enum): #Kandinsky2_1 = "kandinsky-2.1" class ModelType(str, Enum): - Pipeline = "pipeline" + Main = "main" Vae = "vae" Lora = "lora" ControlNet = "controlnet" # used by model_probe diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py index aa7d05b766..a269ae12e2 100644 --- a/invokeai/backend/model_management/models/stable_diffusion.py +++ b/invokeai/backend/model_management/models/stable_diffusion.py @@ -40,11 +40,11 @@ class StableDiffusion1Model(DiffusersModel): def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): assert base_model == BaseModelType.StableDiffusion1 - assert model_type == ModelType.Pipeline + assert model_type == ModelType.Main super().__init__( model_path=model_path, base_model=BaseModelType.StableDiffusion1, - model_type=ModelType.Pipeline, + model_type=ModelType.Main, ) @classmethod @@ -140,11 +140,11 @@ class StableDiffusion2Model(DiffusersModel): def __init__(self, model_path: str, base_model: BaseModelType, model_type: ModelType): assert base_model == BaseModelType.StableDiffusion2 - assert model_type == ModelType.Pipeline + assert model_type == ModelType.Main super().__init__( model_path=model_path, base_model=BaseModelType.StableDiffusion2, - model_type=ModelType.Pipeline, + model_type=ModelType.Main, ) @classmethod diff --git a/invokeai/configs/INITIAL_MODELS.yaml b/invokeai/configs/INITIAL_MODELS.yaml index 9ff9efc3f5..4ba67bc4bc 100644 --- a/invokeai/configs/INITIAL_MODELS.yaml +++ b/invokeai/configs/INITIAL_MODELS.yaml @@ -1,58 +1,58 @@ # This file predefines a few models that the user may want to install. -sd-1/pipeline/stable-diffusion-v1-5: +sd-1/main/stable-diffusion-v1-5: description: Stable Diffusion version 1.5 diffusers model (4.27 GB) repo_id: runwayml/stable-diffusion-v1-5 recommended: True default: True -sd-1/pipeline/stable-diffusion-inpainting: +sd-1/main/stable-diffusion-inpainting: description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB) repo_id: runwayml/stable-diffusion-inpainting recommended: True -sd-2/pipeline/stable-diffusion-2-1: +sd-2/main/stable-diffusion-2-1: description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB) repo_id: stabilityai/stable-diffusion-2-1 recommended: True -sd-2/pipeline/stable-diffusion-2-inpainting: +sd-2/main/stable-diffusion-2-inpainting: description: Stable Diffusion version 2.0 inpainting model (5.21 GB) repo_id: stabilityai/stable-diffusion-2-inpainting recommended: False -sd-1/pipeline/Analog-Diffusion: +sd-1/main/Analog-Diffusion: description: An SD-1.5 model trained on diverse analog photographs (2.13 GB) repo_id: wavymulder/Analog-Diffusion recommended: false -sd-1/pipeline/Deliberate: +sd-1/main/Deliberate: description: Versatile model that produces detailed images up to 768px (4.27 GB) repo_id: XpucT/Deliberate recommended: False -sd-1/pipeline/Dungeons-and-Diffusion: +sd-1/main/Dungeons-and-Diffusion: description: Dungeons & Dragons characters (2.13 GB) repo_id: 0xJustin/Dungeons-and-Diffusion recommended: False -sd-1/pipeline/dreamlike-photoreal-2: +sd-1/main/dreamlike-photoreal-2: description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB) repo_id: dreamlike-art/dreamlike-photoreal-2.0 recommended: False -sd-1/pipeline/Inkpunk-Diffusion: +sd-1/main/Inkpunk-Diffusion: description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB) repo_id: Envvi/Inkpunk-Diffusion recommended: False -sd-1/pipeline/openjourney: +sd-1/main/openjourney: description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB) repo_id: prompthero/openjourney recommended: False -sd-1/pipeline/portraitplus: +sd-1/main/portraitplus: description: An SD-1.5 model trained on close range portraits of people; prompt with "portrait+" (2.13 GB) repo_id: wavymulder/portraitplus recommended: False -sd-1/pipeline/seek.art_MEGA: +sd-1/main/seek.art_MEGA: repo_id: coreco/seek.art_MEGA description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB) recommended: False -sd-1/pipeline/trinart_stable_diffusion_v2: +sd-1/main/trinart_stable_diffusion_v2: description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB) repo_id: naclbit/trinart_stable_diffusion_v2 recommended: False -sd-1/pipeline/waifu-diffusion: +sd-1/main/waifu-diffusion: description: An SD-1.5 model trained on 680k anime/manga-style images (2.13 GB) repo_id: hakurei/waifu-diffusion recommended: False diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 206cdcacdb..5dd72d4776 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -127,7 +127,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): self.nextrely = top_of_table self.pipeline_models = self.add_pipeline_widgets( - model_type=ModelType.Pipeline, + model_type=ModelType.Main, window_width=window_width, exclude = self.starter_models ) @@ -303,7 +303,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): ### Tab for arbitrary diffusers widgets ### def add_pipeline_widgets(self, - model_type: ModelType=ModelType.Pipeline, + model_type: ModelType=ModelType.Main, window_width: int=120, **kwargs, )->dict[str,npyscreen.widget]: From c3c4a711734084fdb37bec3ce2984b5e8a6364de Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 24 Jun 2023 12:37:26 -0400 Subject: [PATCH 19/47] implemented Stalker's suggested improvements --- invokeai/backend/install/migrate_to_3.py | 2 +- .../backend/model_management/model_probe.py | 51 +++++++++++-------- .../backend/model_management/models/base.py | 12 +++-- 3 files changed, 40 insertions(+), 25 deletions(-) diff --git a/invokeai/backend/install/migrate_to_3.py b/invokeai/backend/install/migrate_to_3.py index 77a476e5c5..713f9c5a83 100644 --- a/invokeai/backend/install/migrate_to_3.py +++ b/invokeai/backend/install/migrate_to_3.py @@ -478,7 +478,7 @@ def _parse_legacy_initfile(root: Path, initfile: Path)->ModelPaths: models = root / 'models', embeddings = root / str(opt.embedding_path).strip('"'), loras = root / str(opt.lora_path).strip('"'), - controlnets = None + controlnets = root / 'controlnets', ) def _parse_legacy_yamlfile(root: Path, initfile: Path)->ModelPaths: diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py index 17c76250b9..2b6eb7e7be 100644 --- a/invokeai/backend/model_management/model_probe.py +++ b/invokeai/backend/model_management/model_probe.py @@ -9,7 +9,11 @@ from pathlib import Path from typing import Callable, Literal, Union, Dict from picklescan.scanner import scan_file_path -from .models import BaseModelType, ModelType, ModelVariantType, SchedulerPredictionType, SilenceWarnings +from .models import ( + BaseModelType, ModelType, ModelVariantType, + SchedulerPredictionType, SilenceWarnings, +) +from .models.base import read_checkpoint_meta @dataclass class ModelProbeInfo(object): @@ -105,29 +109,34 @@ class ModelProbe(object): return model_info @classmethod - def get_model_type_from_checkpoint(cls, model_path: Path, checkpoint: dict)->ModelType: - if model_path.suffix not in ('.bin','.pt','.ckpt','.safetensors'): + def get_model_type_from_checkpoint(cls, model_path: Path, checkpoint: dict) -> ModelType: + if model_path.suffix not in ('.bin','.pt','.ckpt','.safetensors','.pth'): return None - if model_path.name=='learned_embeds.bin': + + if model_path.name == "learned_embeds.bin": return ModelType.TextualInversion - checkpoint = checkpoint or cls._scan_and_load_checkpoint(model_path) - state_dict = checkpoint.get("state_dict") or checkpoint + + checkpoint = checkpoint or read_checkpoint_meta(model_path, scan=True) + checkpoint = checkpoint.get("state_dict", checkpoint) + + for key in checkpoint.keys(): + if any(key.startswith(v) for v in {"cond_stage_model.", "first_stage_model.", "model.diffusion_model."}): + return ModelType.Main + elif any(key.startswith(v) for v in {"encoder.conv_in", "decoder.conv_in"}): + return ModelType.Vae + elif any(key.startswith(v) for v in {"lora_te_", "lora_unet_"}): + return ModelType.Lora + elif any(key.startswith(v) for v in {"control_model", "input_blocks"}): + return ModelType.ControlNet + elif key in {"emb_params", "string_to_param"}: + return ModelType.TextualInversion + + else: + # diffusers-ti + if len(checkpoint) < 10 and all(isinstance(v, torch.Tensor) for v in checkpoint.values()): + return ModelType.TextualInversion - if len(checkpoint) < 10 and all(isinstance(v, torch.Tensor) for v in checkpoint.values()): - return ModelType.TextualInversion - if any([x.startswith("model.diffusion_model") for x in state_dict.keys()]): - return ModelType.Main - if any([x.startswith("encoder.conv_in") for x in state_dict.keys()]): - return ModelType.Vae - if "string_to_token" in state_dict or "emb_params" in state_dict: - return ModelType.TextualInversion - if any([x.startswith("lora") for x in state_dict.keys()]): - return ModelType.Lora - if any([x.startswith("control_model") for x in state_dict.keys()]): - return ModelType.ControlNet - if any([x.startswith("input_blocks") for x in state_dict.keys()]): - return ModelType.ControlNet - return None # give up + raise ValueError("Unable to determine model type") @classmethod def get_model_type_from_folder(cls, folder_path: Path, model: ModelMixin)->ModelType: diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py index beaac40109..582ed233b7 100644 --- a/invokeai/backend/model_management/models/base.py +++ b/invokeai/backend/model_management/models/base.py @@ -1,9 +1,12 @@ +import json import os import sys import typing import inspect from enum import Enum from abc import ABCMeta, abstractmethod +from pathlib import Path +from picklescan.scanner import scan_file_path import torch import safetensors.torch from diffusers import DiffusionPipeline, ConfigMixin @@ -382,15 +385,18 @@ def _fast_safetensors_reader(path: str): return checkpoint - -def read_checkpoint_meta(path: str): - if path.endswith(".safetensors"): +def read_checkpoint_meta(path: Union[str, Path], scan: bool = False): + if str(path).endswith(".safetensors"): try: checkpoint = _fast_safetensors_reader(path) except: # TODO: create issue for support "meta"? checkpoint = safetensors.torch.load_file(path, device="cpu") else: + if scan: + scan_result = scan_file_path(checkpoint) + if scan_result.infected_files != 0: + raise Exception(f"The model file \"{path}\" is potentially infected by malware. Aborting import.") checkpoint = torch.load(path, map_location=torch.device("meta")) return checkpoint From a3c22b5fe6864a2247e42e8f52770ad65e35ce82 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Sun, 25 Jun 2023 21:06:22 +0300 Subject: [PATCH 20/47] Remove upcast_attention and prediction_type from stable diffusion model logic, fix ckpt conversion according to this --- .../convert_ckpt_to_diffusers.py | 47 ++++++++-------- .../models/stable_diffusion.py | 56 +++++-------------- 2 files changed, 35 insertions(+), 68 deletions(-) diff --git a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py index 5d097f5a4e..1eeee92fb7 100644 --- a/invokeai/backend/model_management/convert_ckpt_to_diffusers.py +++ b/invokeai/backend/model_management/convert_ckpt_to_diffusers.py @@ -30,7 +30,7 @@ from invokeai.app.services.config import InvokeAIAppConfig from .model_manager import ModelManager from .model_cache import ModelCache -from .models import SchedulerPredictionType, BaseModelType, ModelVariantType +from .models import BaseModelType, ModelVariantType try: from omegaconf import OmegaConf @@ -73,7 +73,9 @@ from transformers import ( from ..stable_diffusion import StableDiffusionGeneratorPipeline -MODEL_ROOT = None +# TODO: redo in future +#CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().models_path / "core" / "convert" +CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().root_path / "models" / "core" / "convert" def shave_segments(path, n_shave_prefix_segments=1): """ @@ -605,7 +607,7 @@ def convert_ldm_vae_checkpoint(checkpoint, config): else: vae_state_dict = checkpoint - new_checkpoint = convert_ldm_vae_state_dict(vae_state_dict,config) + new_checkpoint = convert_ldm_vae_state_dict(vae_state_dict, config) return new_checkpoint def convert_ldm_vae_state_dict(vae_state_dict, config): @@ -828,7 +830,7 @@ def convert_ldm_bert_checkpoint(checkpoint, config): def convert_ldm_clip_checkpoint(checkpoint): - text_model = CLIPTextModel.from_pretrained(MODEL_ROOT / 'clip-vit-large-patch14') + text_model = CLIPTextModel.from_pretrained(CONVERT_MODEL_ROOT / 'clip-vit-large-patch14') keys = list(checkpoint.keys()) text_model_dict = {} @@ -882,7 +884,7 @@ textenc_pattern = re.compile("|".join(protected.keys())) def convert_open_clip_checkpoint(checkpoint): text_model = CLIPTextModel.from_pretrained( - MODEL_ROOT / 'stable-diffusion-2-clip', + CONVERT_MODEL_ROOT / 'stable-diffusion-2-clip', subfolder='text_encoder', ) @@ -949,7 +951,7 @@ def convert_open_clip_checkpoint(checkpoint): return text_model -def replace_checkpoint_vae(checkpoint, vae_path:str): +def replace_checkpoint_vae(checkpoint, vae_path: str): if vae_path.endswith(".safetensors"): vae_ckpt = load_file(vae_path) else: @@ -959,7 +961,7 @@ def replace_checkpoint_vae(checkpoint, vae_path:str): new_key = f'first_stage_model.{vae_key}' checkpoint[new_key] = state_dict[vae_key] -def convert_ldm_vae_to_diffusers(checkpoint, vae_config: DictConfig, image_size: int)->AutoencoderKL: +def convert_ldm_vae_to_diffusers(checkpoint, vae_config: DictConfig, image_size: int) -> AutoencoderKL: vae_config = create_vae_diffusers_config( vae_config, image_size=image_size ) @@ -979,8 +981,6 @@ def load_pipeline_from_original_stable_diffusion_ckpt( original_config_file: str, extract_ema: bool = True, precision: torch.dtype = torch.float32, - upcast_attention: bool = False, - prediction_type: SchedulerPredictionType = SchedulerPredictionType.Epsilon, scan_needed: bool = True, ) -> StableDiffusionPipeline: """ @@ -994,8 +994,6 @@ def load_pipeline_from_original_stable_diffusion_ckpt( :param checkpoint_path: Path to `.ckpt` file. :param original_config_file: Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically inferred by looking for a key that only exists in SD2.0 models. - :param prediction_type: The prediction type that the model was trained on. Use `'epsilon'` for Stable Diffusion - v1.X and Stable Diffusion v2 Base. Use `'v-prediction'` for Stable Diffusion v2. :param scheduler_type: Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm", "ddim"]`. :param model_type: The pipeline type. `None` to automatically infer, or one of `["FrozenOpenCLIPEmbedder", "FrozenCLIPEmbedder"]`. :param extract_ema: Only relevant for @@ -1003,17 +1001,16 @@ def load_pipeline_from_original_stable_diffusion_ckpt( or not. Defaults to `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning. :param precision: precision to use - torch.float16, torch.float32 or torch.autocast - :param upcast_attention: Whether the attention computation should always be upcasted. This is necessary when - running stable diffusion 2.1. """ - config = InvokeAIAppConfig.get_config() + if not isinstance(checkpoint_path, Path): + checkpoint_path = Path(checkpoint_path) with warnings.catch_warnings(): warnings.simplefilter("ignore") verbosity = dlogging.get_verbosity() dlogging.set_verbosity_error() - if str(checkpoint_path).endswith(".safetensors"): + if checkpoint_path.suffix == ".safetensors": checkpoint = load_file(checkpoint_path) else: if scan_needed: @@ -1026,9 +1023,13 @@ def load_pipeline_from_original_stable_diffusion_ckpt( original_config = OmegaConf.load(original_config_file) - if model_version == BaseModelType.StableDiffusion2 and prediction_type == SchedulerPredictionType.VPrediction: + if model_version == BaseModelType.StableDiffusion2 and original_config["model"]["params"]["parameterization"] == "v": + prediction_type = "v_prediction" + upcast_attention = True image_size = 768 else: + prediction_type = "epsilon" + upcast_attention = False image_size = 512 # @@ -1083,7 +1084,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt( if model_type == "FrozenOpenCLIPEmbedder": text_model = convert_open_clip_checkpoint(checkpoint) tokenizer = CLIPTokenizer.from_pretrained( - MODEL_ROOT / 'stable-diffusion-2-clip', + CONVERT_MODEL_ROOT / 'stable-diffusion-2-clip', subfolder='tokenizer', ) pipe = StableDiffusionPipeline( @@ -1099,9 +1100,9 @@ def load_pipeline_from_original_stable_diffusion_ckpt( elif model_type in ["FrozenCLIPEmbedder", "WeightedFrozenCLIPEmbedder"]: text_model = convert_ldm_clip_checkpoint(checkpoint) - tokenizer = CLIPTokenizer.from_pretrained(MODEL_ROOT / 'clip-vit-large-patch14') - safety_checker = StableDiffusionSafetyChecker.from_pretrained(MODEL_ROOT / 'stable-diffusion-safety-checker') - feature_extractor = AutoFeatureExtractor.from_pretrained(MODEL_ROOT / 'stable-diffusion-safety-checker') + tokenizer = CLIPTokenizer.from_pretrained(CONVERT_MODEL_ROOT / 'clip-vit-large-patch14') + safety_checker = StableDiffusionSafetyChecker.from_pretrained(CONVERT_MODEL_ROOT / 'stable-diffusion-safety-checker') + feature_extractor = AutoFeatureExtractor.from_pretrained(CONVERT_MODEL_ROOT / 'stable-diffusion-safety-checker') pipe = StableDiffusionPipeline( vae=vae.to(precision), text_encoder=text_model.to(precision), @@ -1115,7 +1116,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt( else: text_config = create_ldm_bert_config(original_config) text_model = convert_ldm_bert_checkpoint(checkpoint, text_config) - tokenizer = BertTokenizerFast.from_pretrained(MODEL_ROOT / "bert-base-uncased") + tokenizer = BertTokenizerFast.from_pretrained(CONVERT_MODEL_ROOT / "bert-base-uncased") pipe = LDMTextToImagePipeline( vqvae=vae, bert=text_model, @@ -1131,7 +1132,6 @@ def load_pipeline_from_original_stable_diffusion_ckpt( def convert_ckpt_to_diffusers( checkpoint_path: Union[str, Path], dump_path: Union[str, Path], - model_root: Union[str, Path], **kwargs, ): """ @@ -1139,9 +1139,6 @@ def convert_ckpt_to_diffusers( and in addition a path-like object indicating the location of the desired diffusers model to be written. """ - # setting global here to avoid massive changes late at night - global MODEL_ROOT - MODEL_ROOT = Path(model_root) / 'core/convert' pipe = load_pipeline_from_original_stable_diffusion_ckpt(checkpoint_path, **kwargs) pipe.save_pretrained( diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py index a269ae12e2..f5112dfebb 100644 --- a/invokeai/backend/model_management/models/stable_diffusion.py +++ b/invokeai/backend/model_management/models/stable_diffusion.py @@ -34,7 +34,7 @@ class StableDiffusion1Model(DiffusersModel): class CheckpointConfig(ModelConfigBase): model_format: Literal[StableDiffusion1ModelFormat.Checkpoint] vae: Optional[str] = Field(None) - config: Optional[str] = Field(None) + config: str variant: ModelVariantType @@ -81,6 +81,8 @@ class StableDiffusion1Model(DiffusersModel): else: raise Exception("Unkown stable diffusion 1.* model format") + if ckpt_config_path is None: + ckpt_config_path = _select_ckpt_config(BaseModelType.StableDiffusion1, variant) return cls.create_config( path=path, @@ -116,7 +118,7 @@ class StableDiffusion1Model(DiffusersModel): version=BaseModelType.StableDiffusion1, model_config=config, output_path=output_path, - ) # TODO: args + ) else: return model_path @@ -183,13 +185,8 @@ class StableDiffusion2Model(DiffusersModel): else: raise Exception("Unkown stable diffusion 2.* model format") - if variant == ModelVariantType.Normal: - prediction_type = SchedulerPredictionType.VPrediction - upcast_attention = True - - else: - prediction_type = SchedulerPredictionType.Epsilon - upcast_attention = False + if ckpt_config_path is None: + ckpt_config_path = _select_ckpt_config(BaseModelType.StableDiffusion2, variant) return cls.create_config( path=path, @@ -197,8 +194,6 @@ class StableDiffusion2Model(DiffusersModel): config=ckpt_config_path, variant=variant, - prediction_type=prediction_type, - upcast_attention=upcast_attention, ) @classproperty @@ -227,7 +222,7 @@ class StableDiffusion2Model(DiffusersModel): version=BaseModelType.StableDiffusion2, model_config=config, output_path=output_path, - ) # TODO: args + ) else: return model_path @@ -238,18 +233,18 @@ def _select_ckpt_config(version: BaseModelType, variant: ModelVariantType): ModelVariantType.Inpaint: "v1-inpainting-inference.yaml", }, BaseModelType.StableDiffusion2: { - # code further will manually set upcast_attention and v_prediction - ModelVariantType.Normal: "v2-inference.yaml", + ModelVariantType.Normal: "v2-inference-v.yaml", # best guess, as we can't differentiate with base(512) ModelVariantType.Inpaint: "v2-inpainting-inference.yaml", ModelVariantType.Depth: "v2-midas-inference.yaml", } } + app_config = InvokeAIAppConfig.get_config() try: - # TODO: path - #model_config.config = app_config.config_dir / "stable-diffusion" / ckpt_configs[version][model_config.variant] - #return InvokeAIAppConfig.get_config().legacy_conf_dir / ckpt_configs[version][variant] - return InvokeAIAppConfig.get_config().root_dir / "configs" / "stable-diffusion" / ckpt_configs[version][variant] + config_path = app_config.legacy_conf_path / ckpt_configs[version][variant] + if config_path.is_relative_to(app_config.root_path): + config_path = config_path.relative_to(app_config.root_path) + return str(config_path) except: return None @@ -268,36 +263,14 @@ def _convert_ckpt_and_cache( """ app_config = InvokeAIAppConfig.get_config() - if model_config.config is None: - model_config.config = _select_ckpt_config(version, model_config.variant) - if model_config.config is None: - raise Exception(f"Model variant {model_config.variant} not supported for {version}") - - weights = app_config.root_dir / model_config.path config_file = app_config.root_dir / model_config.config output_path = Path(output_path) - if version == BaseModelType.StableDiffusion1: - upcast_attention = False - prediction_type = SchedulerPredictionType.Epsilon - - elif version == BaseModelType.StableDiffusion2: - upcast_attention = model_config.upcast_attention - prediction_type = model_config.prediction_type - - else: - raise Exception(f"Unknown model provided: {version}") - - # return cached version if it exists if output_path.exists(): return output_path - # TODO: I think that it more correctly to convert with embedded vae - # as if user will delete custom vae he will got not embedded but also custom vae - #vae_ckpt_path, vae_model = self._get_vae_for_conversion(weights, mconfig) - # to avoid circular import errors from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers with SilenceWarnings(): @@ -308,9 +281,6 @@ def _convert_ckpt_and_cache( model_variant=model_config.variant, original_config_file=config_file, extract_ema=True, - upcast_attention=upcast_attention, - prediction_type=prediction_type, scan_needed=True, - model_root=app_config.models_path, ) return output_path From 10c3753d7f9ed7cf2415091160f08ee45e30a681 Mon Sep 17 00:00:00 2001 From: user1 Date: Sun, 25 Jun 2023 11:16:39 -0700 Subject: [PATCH 21/47] Added SAM preprocessor --- .../app/invocations/controlnet_image_processors.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index baf558ac24..0fdfc12905 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -29,6 +29,7 @@ from controlnet_aux import ( ContentShuffleDetector, ZoeDetector, MediapipeFaceDetector, + SamDetector, ) from .image import ImageOutput, PILInvocationConfig @@ -455,3 +456,15 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation, PILInvocationCo mediapipe_face_processor = MediapipeFaceDetector() processed_image = mediapipe_face_processor(image, max_faces=self.max_faces, min_confidence=self.min_confidence) return processed_image + + +class SegmentAnythingProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig): + """Applies segment anything processing to image""" + # fmt: off + type: Literal["segment_anything_processor"] = "segment_anything_processor" + # fmt: on + + def run_processor(self, image): + segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints") + processed_image = segment_anything_processor(image) + return processed_image From de4064bdac1d80ec3aac496b3727854094a42d8f Mon Sep 17 00:00:00 2001 From: user1 Date: Sun, 25 Jun 2023 12:38:17 -0700 Subject: [PATCH 22/47] Fixed problem with with non-reproducible results from ControlNet SegmentAnything preprocessor. Cause was controlnet_aux randomization of segmentation coloring, which seems to lead to some randomization of resulting images using ControlNet seg model. Switched to using deterministic ADE20K color palette instead, which solved the problem. --- .../controlnet_image_processors.py | 36 +++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 0fdfc12905..c5777284a5 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -4,7 +4,7 @@ from builtins import float, bool import numpy as np -from typing import Literal, Optional, Union, List +from typing import Literal, Optional, Union, List, Dict from PIL import Image, ImageFilter, ImageOps from pydantic import BaseModel, Field, validator @@ -32,6 +32,9 @@ from controlnet_aux import ( SamDetector, ) +from controlnet_aux.util import ade_palette + + from .image import ImageOutput, PILInvocationConfig CONTROLNET_DEFAULT_MODELS = [ @@ -465,6 +468,35 @@ class SegmentAnythingProcessorInvocation(ImageProcessorInvocation, PILInvocation # fmt: on def run_processor(self, image): - segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints") + # segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints") + segment_anything_processor = SamDetectorReproducibleColors.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints") processed_image = segment_anything_processor(image) return processed_image + +class SamDetectorReproducibleColors(SamDetector): + + # overriding SamDetector.show_anns() method to use reproducible colors for segmentation image + # base class show_anns() method randomizes colors, + # which seems to also lead to non-reproducible image generation + # so using ADE20k color palette instead + def show_anns(self, anns: List[Dict]): + if len(anns) == 0: + return + sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True) + h, w = anns[0]['segmentation'].shape + final_img = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8), mode="RGB") + print("number of annotations: ", len(sorted_anns)) + print("type of annotations: ", type(sorted_anns)) + palette = ade_palette() + for i, ann in enumerate(sorted_anns): + m = ann['segmentation'] + img = np.empty((m.shape[0], m.shape[1], 3), dtype=np.uint8) + # doing modulo just in case number of annotated regions exceeds number of colors in palette + ann_color = palette[i % len(palette)] + print(ann_color) + img[:, :, 0] = ann_color[0] + img[:, :, 1] = ann_color[1] + img[:, :, 2] = ann_color[2] + final_img.paste(Image.fromarray(img, mode="RGB"), (0, 0), Image.fromarray(np.uint8(m * 255))) + + return np.array(final_img, dtype=np.uint8) From b872e7a5e0ec7c1cb1c7ef5ca31065bce9cd74af Mon Sep 17 00:00:00 2001 From: user1 Date: Sun, 25 Jun 2023 12:54:48 -0700 Subject: [PATCH 23/47] Simplifying ControlNet SAM preprocessor segmentation color mapping. --- invokeai/app/invocations/controlnet_image_processors.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index c5777284a5..d7825111d3 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -485,18 +485,12 @@ class SamDetectorReproducibleColors(SamDetector): sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True) h, w = anns[0]['segmentation'].shape final_img = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8), mode="RGB") - print("number of annotations: ", len(sorted_anns)) - print("type of annotations: ", type(sorted_anns)) palette = ade_palette() for i, ann in enumerate(sorted_anns): m = ann['segmentation'] img = np.empty((m.shape[0], m.shape[1], 3), dtype=np.uint8) # doing modulo just in case number of annotated regions exceeds number of colors in palette ann_color = palette[i % len(palette)] - print(ann_color) - img[:, :, 0] = ann_color[0] - img[:, :, 1] = ann_color[1] - img[:, :, 2] = ann_color[2] + img[:, :] = ann_color final_img.paste(Image.fromarray(img, mode="RGB"), (0, 0), Image.fromarray(np.uint8(m * 255))) - return np.array(final_img, dtype=np.uint8) From 60b37b7ff49ee28122c28af3e39be0a168101cb3 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 25 Jun 2023 16:04:43 -0400 Subject: [PATCH 24/47] fix model manager documentation --- invokeai/app/services/config.py | 3 +- .../backend/install/model_install_backend.py | 10 +- .../backend/model_management/model_manager.py | 396 +++++++++--------- .../backend/model_management/models/base.py | 5 +- invokeai/frontend/install/model_install.py | 4 +- 5 files changed, 217 insertions(+), 201 deletions(-) diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index 014119289f..113f8b4a8e 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -374,7 +374,8 @@ setting environment variables INVOKEAI_. tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category='Memory/Performance') root : Path = Field(default=_find_root(), description='InvokeAI runtime root directory', category='Paths') - autoconvert_dir : Path = Field(default=None, description='Path to a directory of ckpt files to be converted into diffusers and imported on startup.', category='Paths') + autoimport_dir : Path = Field(default='models/autoimport', description='Path to a directory of models files to be imported on startup.', category='Paths') + autoconvert_dir : Path = Field(default=None, description='Deprecated configuration option.', category='Paths') conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths') models_dir : Path = Field(default='./models', description='Path to the models directory', category='Paths') legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths') diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 1e50c0c9b6..13283f023b 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -179,9 +179,9 @@ class ModelInstall(object): self.mgr.commit() if selections.autoscan_on_startup and Path(selections.scan_directory).is_dir(): - update_autoconvert_dir(selections.scan_directory) + update_autoimport_dir(selections.scan_directory) else: - update_autoconvert_dir(None) + update_autoimport_dir(None) def heuristic_install(self, model_path_id_or_url: Union[str,Path]): # A little hack to allow nested routines to retrieve info on the requested ID @@ -375,13 +375,13 @@ class ModelInstall(object): ''' return {v.get('path') or v.get('repo_id') : k for k, v in datasets.items()} -def update_autoconvert_dir(autodir: Path): +def update_autoimport_dir(autodir: Path): ''' - Update the "autoconvert_dir" option in invokeai.yaml + Update the "autoimport_dir" option in invokeai.yaml ''' invokeai_config_path = config.init_file_path conf = OmegaConf.load(invokeai_config_path) - conf.InvokeAI.Paths.autoconvert_dir = str(autodir) if autodir else None + conf.InvokeAI.Paths.autoimport_dir = str(autodir) if autodir else None yaml = OmegaConf.to_yaml(conf) tmpfile = invokeai_config_path.parent / "new_config.tmp" with open(tmpfile, "w", encoding="utf-8") as outfile: diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index eafc283b1d..fb5c0f9a9c 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -1,53 +1,193 @@ """This module manages the InvokeAI `models.yaml` file, mapping -symbolic diffusers model names to the paths and repo_ids used -by the underlying `from_pretrained()` call. +symbolic diffusers model names to the paths and repo_ids used by the +underlying `from_pretrained()` call. -For fetching models, use manager.get_model('symbolic name'). This will -return a ModelInfo object that contains the following attributes: - - * context -- a context manager Generator that loads and locks the - model into GPU VRAM and returns the model for use. - See below for usage. - * name -- symbolic name of the model - * type -- SubModelType of the model - * hash -- unique hash for the model - * location -- path or repo_id of the model - * revision -- revision of the model if coming from a repo id, - e.g. 'fp16' - * precision -- torch precision of the model +SYNOPSIS: -Typical usage: + mgr = ModelManager('/home/phi/invokeai/configs/models.yaml') + sd1_5 = mgr.get_model('stable-diffusion-v1-5', + model_type=ModelType.Main, + base_model=BaseModelType.StableDiffusion1, + submodel_type=SubModelType.Unet) + with sd1_5 as unet: + run_some_inference(unet) - from invokeai.backend import ModelManager +FETCHING MODELS: - manager = ModelManager( - config='./configs/models.yaml', - max_cache_size=8 - ) # gigabytes +Models are described using four attributes: - model_info = manager.get_model('stable-diffusion-1.5', SubModelType.Diffusers) - with model_info.context as my_model: - my_model.latents_from_embeddings(...) + 1) model_name -- the symbolic name for the model -The manager uses the underlying ModelCache class to keep -frequently-used models in RAM and move them into GPU as needed for -generation operations. The optional `max_cache_size` argument -indicates the maximum size the cache can grow to, in gigabytes. The -underlying ModelCache object can be accessed using the manager's "cache" -attribute. + 2) ModelType -- an enum describing the type of the model. Currently + defined types are: + ModelType.Main -- a full model capable of generating images + ModelType.Vae -- a VAE model + ModelType.Lora -- a LoRA or LyCORIS fine-tune + ModelType.TextualInversion -- a textual inversion embedding + ModelType.ControlNet -- a ControlNet model -Because the model manager can return multiple different types of -models, you may wish to add additional type checking on the class -of model returned. To do this, provide the option `model_type` -parameter: + 3) BaseModelType -- an enum indicating the stable diffusion base model, one of: + BaseModelType.StableDiffusion1 + BaseModelType.StableDiffusion2 - model_info = manager.get_model( - 'clip-tokenizer', - model_type=SubModelType.Tokenizer - ) + 4) SubModelType (optional) -- an enum that refers to one of the submodels contained + within the main model. Values are: -This will raise an InvalidModelError if the format defined in the -config file doesn't match the requested model type. + SubModelType.UNet + SubModelType.TextEncoder + SubModelType.Tokenizer + SubModelType.Scheduler + SubModelType.SafetyChecker + +To fetch a model, use `manager.get_model()`. This takes the symbolic +name of the model, the ModelType, the BaseModelType and the +SubModelType. The latter is required for ModelType.Main. + +get_model() will return a ModelInfo object that can then be used in +context to retrieve the model and move it into GPU VRAM (on GPU +systems). + +A typical example is: + + sd1_5 = mgr.get_model('stable-diffusion-v1-5', + model_type=ModelType.Main, + base_model=BaseModelType.StableDiffusion1, + submodel_type=SubModelType.Unet) + with sd1_5 as unet: + run_some_inference(unet) + +The ModelInfo object provides a number of useful fields describing the +model, including: + + name -- symbolic name of the model + base_model -- base model (BaseModelType) + type -- model type (ModelType) + location -- path to the model file + precision -- torch precision of the model + hash -- unique sha256 checksum for this model + +SUBMODELS: + +When fetching a main model, you must specify the submodel. Retrieval +of full pipelines is not supported. + + vae_info = mgr.get_model('stable-diffusion-1.5', + model_type = ModelType.Main, + base_model = BaseModelType.StableDiffusion1, + submodel_type = SubModelType.Vae + ) + with vae_info as vae: + do_something(vae) + +This rule does not apply to controlnets, embeddings, loras and standalone +VAEs, which do not have submodels. + +LISTING MODELS + +The model_names() method will return a list of Tuples describing each +model it knows about: + + >> mgr.model_names() + [ + ('stable-diffusion-1.5', , ), + ('stable-diffusion-2.1', , ), + ('inpaint', , ) + ('Ink scenery', , ) + ... + ] + +The tuple is in the correct order to pass to get_model(): + + for m in mgr.model_names(): + info = get_model(*m) + +In contrast, the list_models() method returns a list of dicts, each +providing information about a model defined in models.yaml. For example: + + >>> models = mgr.list_models() + >>> json.dumps(models[0]) + {"path": "/home/lstein/invokeai-main/models/sd-1/controlnet/canny", + "model_format": "diffusers", + "name": "canny", + "base_model": "sd-1", + "type": "controlnet" + } + +You can filter by model type and base model as shown here: + + + controlnets = mgr.list_models(model_type=ModelType.ControlNet, + base_model=BaseModelType.StableDiffusion1) + for c in controlnets: + name = c['name'] + format = c['model_format'] + path = c['path'] + type = c['type'] + # etc + +ADDING AND REMOVING MODELS + +At startup time, the `models` directory will be scanned for +checkpoints, diffusers pipelines, controlnets, LoRAs and TI +embeddings. New entries will be added to the model manager and defunct +ones removed. Anything that is a main model (ModelType.Main) will be +added to models.yaml. For scanning to succeed, files need to be in +their proper places. For example, a controlnet folder built on the +stable diffusion 2 base, will need to be placed in +`models/sd-2/controlnet`. + +Layout of the `models` directory: + + models + ├── sd-1 + │   ├── controlnet + │   ├── lora + │   ├── main + │   └── embedding + ├── sd-2 + │   ├── controlnet + │   ├── lora + │   ├── main + │ └── embedding + └── core + ├── face_reconstruction + │ ├── codeformer + │ └── gfpgan + ├── sd-conversion + │ ├── clip-vit-large-patch14 - tokenizer, text_encoder subdirs + │ ├── stable-diffusion-2 - tokenizer, text_encoder subdirs + │ └── stable-diffusion-safety-checker + └── upscaling + └─── esrgan + + + +class ConfigMeta(BaseModel):Loras, textual_inversion and controlnet models are not listed +explicitly in models.yaml, but are added to the in-memory data +structure at initialization time by scanning the models directory. The +in-memory data structure can be resynchronized by calling +`manager.scan_models_directory()`. + +Files and folders placed inside the `autoimport_dir` (path defined in +`invokeai.yaml`, defaulting to `ROOTDIR/autoimport` will also be +scanned for new models at initialization time and added to +`models.yaml`. Files will not be moved from this location but +preserved in-place. + +A model can be manually added using `add_model()` using the model's +name, base model, type and a dict of model attributes. See +`invokeai/backend/model_management/models` for the attributes required +by each model type. + +A model can be deleted using `del_model()`, providing the same +identifying information as `get_model()` + +The `heuristic_import()` method will take a set of strings +corresponding to local paths, remote URLs, and repo_ids, probe the +object to determine what type of model it is (if any), and import new +models into the manager. If passed a directory, it will recursively +scan it for models to import. The return value is a set of the models +successfully added. MODELS.YAML @@ -56,94 +196,18 @@ The general format of a models.yaml section is: type-of-model/name-of-model: path: /path/to/local/file/or/directory description: a description - format: folder|ckpt|safetensors|pt - base: SD-1|SD-2 - subfolder: subfolder-name + format: diffusers|checkpoint + variant: normal|inpaint|depth The type of model is given in the stanza key, and is one of -{diffusers, ckpt, vae, text_encoder, tokenizer, unet, scheduler, -safety_checker, feature_extractor, lora, textual_inversion, -controlnet}, and correspond to items in the SubModelType enum defined -in model_cache.py +{main, vae, lora, controlnet, textual} -The format indicates whether the model is organized as a folder with -model subdirectories, or is contained in a single checkpoint or -safetensors file. - -One, but not both, of repo_id and path are provided. repo_id is the -HuggingFace repository ID of the model, and path points to the file or -directory on disk. - -If subfolder is provided, then the model exists in a subdirectory of -the main model. These are usually named after the model type, such as -"unet". - -This example summarizes the two ways of getting a non-diffuser model: - - text_encoder/clip-test-1: - format: folder - path: /path/to/folder - description: Returns standalone CLIPTextModel - - text_encoder/clip-test-2: - format: folder - repo_id: /path/to/folder - subfolder: text_encoder - description: Returns the text_encoder in the subfolder of the diffusers model (just the encoder in RAM) - -SUBMODELS: - -It is also possible to fetch an isolated submodel from a diffusers -model. Use the `submodel` parameter to select which part: - - vae = manager.get_model('stable-diffusion-1.5',submodel=SubModelType.Vae) - with vae.context as my_vae: - print(type(my_vae)) - # "AutoencoderKL" - -DIRECTORY_SCANNING: - -Loras, textual_inversion and controlnet models are usually not listed -explicitly in models.yaml, but are added to the in-memory data -structure at initialization time by scanning the models directory. The -in-memory data structure can be resynchronized by calling -`manager.scan_models_directory`. - -DISAMBIGUATION: - -You may wish to use the same name for a related family of models. To -do this, disambiguate the stanza key with the model and and format -separated by "/". Example: - - tokenizer/clip-large: - format: tokenizer - path: /path/to/folder - description: Returns standalone tokenizer - - text_encoder/clip-large: - format: text_encoder - path: /path/to/folder - description: Returns standalone text encoder - -You can now use the `model_type` argument to indicate which model you -want: - - tokenizer = mgr.get('clip-large',model_type=SubModelType.Tokenizer) - encoder = mgr.get('clip-large',model_type=SubModelType.TextEncoder) - -OTHER FUNCTIONS: - -Other methods provided by ModelManager support importing, editing, -converting and deleting models. - -IMPORTANT CHANGES AND LIMITATIONS SINCE 2.3: - -1. Only local paths are supported. Repo_ids are no longer accepted. This -simplifies the logic. - -2. VAEs can't be swapped in and out at load time. They must be baked -into the model when downloaded or converted. +The format indicates whether the model is organized as a diffusers +folder with model subdirectories, or is contained in a single +checkpoint or safetensors file. +The path points to a file or directory on disk. If a relative path, +the root is the InvokeAI ROOTDIR. """ from __future__ import annotations @@ -185,7 +249,6 @@ class ModelInfo(): hash: str location: Union[Path, str] precision: torch.dtype - revision: str = None _cache: ModelCache = None def __enter__(self): @@ -201,31 +264,6 @@ class InvalidModelError(Exception): MAX_CACHE_SIZE = 6.0 # GB -# layout of the models directory: -# models -# ├── sd-1 -# │   ├── controlnet -# │   ├── lora -# │   ├── pipeline -# │   └── textual_inversion -# ├── sd-2 -# │   ├── controlnet -# │   ├── lora -# │   ├── pipeline -# │ └── textual_inversion -# └── core -# ├── face_reconstruction -# │ ├── codeformer -# │ └── gfpgan -# ├── sd-conversion -# │ ├── clip-vit-large-patch14 - tokenizer, text_encoder subdirs -# │ ├── stable-diffusion-2 - tokenizer, text_encoder subdirs -# │ └── stable-diffusion-safety-checker -# └── upscaling -# └─── esrgan - - - class ConfigMeta(BaseModel): version: str @@ -330,44 +368,14 @@ class ModelManager(object): base_model: BaseModelType, model_type: ModelType, submodel_type: Optional[SubModelType] = None - ): + )->ModelInfo: """Given a model named identified in models.yaml, return an ModelInfo object describing it. :param model_name: symbolic name of the model in models.yaml :param model_type: ModelType enum indicating the type of model to return + :param base_model: BaseModelType enum indicating the base model used by this model :param submode_typel: an ModelType enum indicating the portion of the model to retrieve (e.g. ModelType.Vae) - - If not provided, the model_type will be read from the `format` field - of the corresponding stanza. If provided, the model_type will be used - to disambiguate stanzas in the configuration file. The default is to - assume a diffusers pipeline. The behavior is illustrated here: - - [models.yaml] - diffusers/test1: - repo_id: foo/bar - description: Typical diffusers pipeline - - lora/test1: - repo_id: /tmp/loras/test1.safetensors - description: Typical lora file - - test1_pipeline = mgr.get_model('test1') - # returns a StableDiffusionGeneratorPipeline - - test1_vae1 = mgr.get_model('test1', submodel=ModelType.Vae) - # returns the VAE part of a diffusers model as an AutoencoderKL - - test1_vae2 = mgr.get_model('test1', model_type=ModelType.Diffusers, submodel=ModelType.Vae) - # does the same thing as the previous statement. Note that model_type - # is for the parent model, and submodel is for the part - - test1_lora = mgr.get_model('test1', model_type=ModelType.Lora) - # returns a LoRA embed (as a 'dict' of tensors) - - test1_encoder = mgr.get_modelI('test1', model_type=ModelType.TextEncoder) - # raises an InvalidModelError - """ model_class = MODEL_CLASSES[base_model][model_type] model_key = self.create_key(model_name, base_model, model_type) @@ -511,7 +519,7 @@ class ModelManager(object): def print_models(self) -> None: """ - Print a table of models, their descriptions + Print a table of models and their descriptions. This needs to be redone """ # TODO: redo for model_type, model_dict in self.list_models().items(): @@ -552,7 +560,7 @@ class ModelManager(object): else: model_path.unlink() - # TODO: test when ui implemented + # LS: tested def add_model( self, model_name: str, @@ -694,11 +702,18 @@ class ModelManager(object): items_to_import: Set[str], prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None, )->Set[str]: - ''' - Import a list of paths, repo_ids or URLs. Returns the - set of successfully imported items. The prediction_type_helper - is a callback that receives the Path of a checkpoint or diffusers - model and returns a SchedulerPredictionType (or None). + '''Import a list of paths, repo_ids or URLs. Returns the set of + successfully imported items. + :param items_to_import: Set of strings corresponding to models to be imported. + :param prediction_type_helper: A callback that receives the Path of a Stable Diffusion 2 checkpoint model and returns a SchedulerPredictionType. + + The prediction type helper is necessary to distinguish between + models based on Stable Diffusion 2 Base (requiring + SchedulerPredictionType.Epsilson) and Stable Diffusion 768 + (requiring SchedulerPredictionType.VPrediction). It is + generally impossible to do this programmatically, so the + prediction_type_helper usually asks the user to choose. + ''' # avoid circular import here from invokeai.backend.install.model_install_backend import ModelInstall @@ -716,6 +731,3 @@ class ModelManager(object): self.commit() return successfully_installed - - - diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py index 582ed233b7..5a03f10212 100644 --- a/invokeai/backend/model_management/models/base.py +++ b/invokeai/backend/model_management/models/base.py @@ -126,7 +126,10 @@ class ModelBase(metaclass=ABCMeta): if not isinstance(value, type) or not issubclass(value, ModelConfigBase): continue - fields = inspect.get_annotations(value) + if hasattr(inspect,'get_annotations'): + fields = inspect.get_annotations(value) + else: + fields = value.__annotations__ try: field = fields["model_format"] except: diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 5dd72d4776..a8d7f53940 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -323,7 +323,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): FileBox, max_height=3, name=label, - value=str(config.autoconvert_dir) if config.autoconvert_dir else None, + value=str(config.autoimport_dir) if config.autoimport_dir else None, select_dir=True, must_exist=True, use_two_lines=False, @@ -336,7 +336,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): autoscan_on_startup = self.add_widget_intelligent( npyscreen.Checkbox, name="Scan and import from this directory each time InvokeAI starts", - value=config.autoconvert_dir is not None, + value=config.autoimport_dir is not None, relx=4, scroll_exit=True, ) From 414a04774c79f980fb20a9a7da4d9d738737c36a Mon Sep 17 00:00:00 2001 From: user1 Date: Sun, 25 Jun 2023 14:19:55 -0700 Subject: [PATCH 25/47] Added LeReS ControlNet image preprocessor. --- .../controlnet_image_processors.py | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index d7825111d3..f573c17c0d 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -30,6 +30,7 @@ from controlnet_aux import ( ZoeDetector, MediapipeFaceDetector, SamDetector, + LeresDetector, ) from controlnet_aux.util import ade_palette @@ -460,6 +461,27 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation, PILInvocationCo processed_image = mediapipe_face_processor(image, max_faces=self.max_faces, min_confidence=self.min_confidence) return processed_image +class LeresImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig): + """Applies leres processing to image""" + # fmt: off + type: Literal["leres_image_processor"] = "leres_image_processor" + # Inputs + thr_a: float = Field(default=0, description="Leres parameter `thr_a`") + thr_b: float = Field(default=0, description="Leres parameter `thr_b`") + boost: bool = Field(default=False, description="Whether to use boost mode") + detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") + image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") + # fmt: on + + def run_processor(self, image): + leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators") + processed_image = leres_processor(image, + thr_a=self.thr_a, + thr_b=self.thr_b, + boost=self.boost, + detect_resolution=self.detect_resolution, + image_resolution=self.image_resolution) + return processed_image class SegmentAnythingProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig): """Applies segment anything processing to image""" From 45aa338a9814a3df1a97efd02b5d45553c5b87ab Mon Sep 17 00:00:00 2001 From: user1 Date: Sun, 25 Jun 2023 14:22:34 -0700 Subject: [PATCH 26/47] Changed pyproject.toml to require controlnet_aux >= 0.0.5 (to enable use of SAM ControlNet preprocessor) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 03396312ac..d470b76937 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ dependencies = [ "click", "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "compel>=1.2.1", - "controlnet-aux>=0.0.4", + "controlnet-aux>=0.0.5", "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 "datasets", "diffusers[torch]~=0.17.1", From 10e8389fa4ab702899264c88c0613bed95e06cb0 Mon Sep 17 00:00:00 2001 From: user1 Date: Sun, 25 Jun 2023 14:25:14 -0700 Subject: [PATCH 27/47] Commenting out LeReS ControlNet image preprocessor until release of controlnet_aux v0.0.6 (supported on controlnet_aux current main, but not on latest release v0.0.5) --- .../controlnet_image_processors.py | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index f573c17c0d..2bd0a5cf04 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -30,7 +30,7 @@ from controlnet_aux import ( ZoeDetector, MediapipeFaceDetector, SamDetector, - LeresDetector, + # LeresDetector, ) from controlnet_aux.util import ade_palette @@ -461,27 +461,27 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation, PILInvocationCo processed_image = mediapipe_face_processor(image, max_faces=self.max_faces, min_confidence=self.min_confidence) return processed_image -class LeresImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig): - """Applies leres processing to image""" - # fmt: off - type: Literal["leres_image_processor"] = "leres_image_processor" - # Inputs - thr_a: float = Field(default=0, description="Leres parameter `thr_a`") - thr_b: float = Field(default=0, description="Leres parameter `thr_b`") - boost: bool = Field(default=False, description="Whether to use boost mode") - detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") - image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") - # fmt: on - - def run_processor(self, image): - leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators") - processed_image = leres_processor(image, - thr_a=self.thr_a, - thr_b=self.thr_b, - boost=self.boost, - detect_resolution=self.detect_resolution, - image_resolution=self.image_resolution) - return processed_image +# class LeresImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig): +# """Applies leres processing to image""" +# # fmt: off +# type: Literal["leres_image_processor"] = "leres_image_processor" +# # Inputs +# thr_a: float = Field(default=0, description="Leres parameter `thr_a`") +# thr_b: float = Field(default=0, description="Leres parameter `thr_b`") +# boost: bool = Field(default=False, description="Whether to use boost mode") +# detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") +# image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") +# # fmt: on +# +# def run_processor(self, image): +# leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators") +# processed_image = leres_processor(image, +# thr_a=self.thr_a, +# thr_b=self.thr_b, +# boost=self.boost, +# detect_resolution=self.detect_resolution, +# image_resolution=self.image_resolution) +# return processed_image class SegmentAnythingProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig): """Applies segment anything processing to image""" From 160b5d79928c38f12f0ca9e0ca4c358bbf57c1cf Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sun, 25 Jun 2023 18:50:15 -0400 Subject: [PATCH 28/47] add support for an autoimport models directory scanned at startup time --- invokeai/app/services/config.py | 10 +- .../backend/install/invokeai_configure.py | 1 + .../backend/install/model_install_backend.py | 135 ++++++++++-------- .../backend/model_management/model_manager.py | 109 +++++++++----- invokeai/backend/util/__init__.py | 1 + invokeai/backend/util/util.py | 15 ++ invokeai/frontend/install/model_install.py | 10 +- 7 files changed, 177 insertions(+), 104 deletions(-) diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index 113f8b4a8e..232cbe7932 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -15,7 +15,7 @@ InvokeAI: conf_path: configs/models.yaml legacy_conf_dir: configs/stable-diffusion outdir: outputs - autoconvert_dir: null + autoimport_dir: null Models: model: stable-diffusion-1.5 embeddings: true @@ -367,17 +367,17 @@ setting environment variables INVOKEAI_. always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance') free_gpu_mem : bool = Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance') - max_loaded_models : int = Field(default=2, gt=0, description="Maximum number of models to keep in memory for rapid switching", category='Memory/Performance') + max_loaded_models : int = Field(default=3, gt=0, description="Maximum number of models to keep in memory for rapid switching", category='Memory/Performance') precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='float16',description='Floating point precision', category='Memory/Performance') sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance') xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance') tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category='Memory/Performance') root : Path = Field(default=_find_root(), description='InvokeAI runtime root directory', category='Paths') - autoimport_dir : Path = Field(default='models/autoimport', description='Path to a directory of models files to be imported on startup.', category='Paths') - autoconvert_dir : Path = Field(default=None, description='Deprecated configuration option.', category='Paths') + autoimport_dir : Path = Field(default='autoimport', description='Path to a directory of models files to be imported on startup.', category='Paths') + autoconvert_dir : Path = Field(default=None, description='Deprecated configuration option.', category='Paths') conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths') - models_dir : Path = Field(default='./models', description='Path to the models directory', category='Paths') + models_dir : Path = Field(default='models', description='Path to the models directory', category='Paths') legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths') db_dir : Path = Field(default='databases', description='Path to InvokeAI databases directory', category='Paths') outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths') diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index 5f9612fcea..b8c171f526 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -605,6 +605,7 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False): for name in ( "models", "databases", + "autoimport", "text-inversion-output", "text-inversion-training-data", "configs" diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 13283f023b..dcc0eac902 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -183,61 +183,67 @@ class ModelInstall(object): else: update_autoimport_dir(None) - def heuristic_install(self, model_path_id_or_url: Union[str,Path]): + def heuristic_install(self, + model_path_id_or_url: Union[str,Path], + models_installed: Set[Path]=None)->Set[Path]: + + if not models_installed: + models_installed = set() + # A little hack to allow nested routines to retrieve info on the requested ID self.current_id = model_path_id_or_url - path = Path(model_path_id_or_url) - # checkpoint file, or similar - if path.is_file(): - self._install_path(path) - return + try: + # checkpoint file, or similar + if path.is_file(): + models_installed.add(self._install_path(path)) - # folders style or similar - if path.is_dir() and any([(path/x).exists() for x in ['config.json','model_index.json','learned_embeds.bin']]): - self._install_path(path) - return + # folders style or similar + elif path.is_dir() and any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]): + models_installed.add(self._install_path(path)) - # recursive scan - if path.is_dir(): - for child in path.iterdir(): - self.heuristic_install(child) - return + # recursive scan + elif path.is_dir(): + for child in path.iterdir(): + self.heuristic_install(child, models_installed=models_installed) - # huggingface repo - parts = str(path).split('/') - if len(parts) == 2: - self._install_repo(str(path)) - return + # huggingface repo + elif len(str(path).split('/')) == 2: + models_installed.add(self._install_repo(str(path))) - # a URL - if model_path_id_or_url.startswith(("http:", "https:", "ftp:")): - self._install_url(model_path_id_or_url) - return + # a URL + elif model_path_id_or_url.startswith(("http:", "https:", "ftp:")): + models_installed.add(self._install_url(model_path_id_or_url)) - logger.warning(f'{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping') + else: + logger.warning(f'{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping') + + except ValueError as e: + logger.error(str(e)) + + return models_installed # install a model from a local path. The optional info parameter is there to prevent # the model from being probed twice in the event that it has already been probed. - def _install_path(self, path: Path, info: ModelProbeInfo=None): + def _install_path(self, path: Path, info: ModelProbeInfo=None)->Path: try: logger.info(f'Probing {path}') info = info or ModelProbe().heuristic_probe(path,self.prediction_helper) - if info.model_type == ModelType.Main: - model_name = path.stem if info.format=='checkpoint' else path.name - if self.mgr.model_exists(model_name, info.base_type, info.model_type): - raise Exception(f'A model named "{model_name}" is already installed.') - attributes = self._make_attributes(path,info) - self.mgr.add_model(model_name = model_name, - base_model = info.base_type, - model_type = info.model_type, - model_attributes = attributes - ) + model_name = path.stem if info.format=='checkpoint' else path.name + if self.mgr.model_exists(model_name, info.base_type, info.model_type): + raise ValueError(f'A model named "{model_name}" is already installed.') + attributes = self._make_attributes(path,info) + self.mgr.add_model(model_name = model_name, + base_model = info.base_type, + model_type = info.model_type, + model_attributes = attributes + ) except Exception as e: logger.warning(f'{str(e)} Skipping registration.') + return path - def _install_url(self, url: str): + def _install_url(self, url: str)->Path: # copy to a staging area, probe, import and delete with TemporaryDirectory(dir=self.config.models_path) as staging: location = download_with_resume(url,Path(staging)) @@ -248,19 +254,9 @@ class ModelInstall(object): models_path = shutil.move(location,dest) # staged version will be garbage-collected at this time - self._install_path(Path(models_path), info) + return self._install_path(Path(models_path), info) - def _get_model_name(self,path_name: str, location: Path)->str: - ''' - Calculate a name for the model - primitive implementation. - ''' - if key := self.reverse_paths.get(path_name): - (name, base, mtype) = ModelManager.parse_key(key) - return name - else: - return location.stem - - def _install_repo(self, repo_id: str): + def _install_repo(self, repo_id: str)->Path: hinfo = HfApi().model_info(repo_id) # we try to figure out how to download this most economically @@ -300,7 +296,17 @@ class ModelInstall(object): if dest.exists(): shutil.rmtree(dest) shutil.copytree(location,dest) - self._install_path(dest, info) + return self._install_path(dest, info) + + def _get_model_name(self,path_name: str, location: Path)->str: + ''' + Calculate a name for the model - primitive implementation. + ''' + if key := self.reverse_paths.get(path_name): + (name, base, mtype) = ModelManager.parse_key(key) + return name + else: + return location.stem def _make_attributes(self, path: Path, info: ModelProbeInfo)->dict: # convoluted way to retrieve the description from datasets @@ -308,9 +314,11 @@ class ModelInstall(object): if key := self.reverse_paths.get(self.current_id): if key in self.datasets: description = self.datasets[key]['description'] - + + rel_path = self.relative_to_root(path) + attributes = dict( - path = str(path), + path = str(rel_path), description = str(description), model_format = info.format, ) @@ -318,18 +326,30 @@ class ModelInstall(object): attributes.update(dict(variant = info.variant_type,)) if info.format=="checkpoint": try: - legacy_conf = LEGACY_CONFIGS[info.base_type][info.variant_type][info.prediction_type] if info.base_type == BaseModelType.StableDiffusion2 \ - else LEGACY_CONFIGS[info.base_type][info.variant_type] + possible_conf = path.with_suffix('.yaml') + if possible_conf.exists(): + legacy_conf = str(self.relative_to_root(possible_conf)) + elif info.base_type == BaseModelType.StableDiffusion2: + legacy_conf = Path(self.config.legacy_conf_dir, LEGACY_CONFIGS[info.base_type][info.variant_type][info.prediction_type]) + else: + legacy_conf = Path(self.config.legacy_conf_dir, LEGACY_CONFIGS[info.base_type][info.variant_type]) except KeyError: - legacy_conf = 'v1-inference.yaml' # best guess + legacy_conf = Path(self.config.legacy_conf_dir, 'v1-inference.yaml') # best guess attributes.update( dict( - config = str(self.config.legacy_conf_path / legacy_conf) + config = str(legacy_conf) ) ) return attributes + def relative_to_root(self, path: Path)->Path: + root = self.config.root_path + if path.is_relative_to(root): + return path.relative_to(root) + else: + return path + def _download_hf_pipeline(self, repo_id: str, staging: Path)->Path: ''' This retrieves a StableDiffusion model from cache or remote and then @@ -379,6 +399,9 @@ def update_autoimport_dir(autodir: Path): ''' Update the "autoimport_dir" option in invokeai.yaml ''' + with open('log.txt','a') as f: + print(f'autodir = {autodir}',file=f) + invokeai_config_path = config.init_file_path conf = OmegaConf.load(invokeai_config_path) conf.InvokeAI.Paths.autoimport_dir = str(autodir) if autodir else None diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index fb5c0f9a9c..5fcb13a48e 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -227,7 +227,7 @@ from pydantic import BaseModel import invokeai.backend.util.logging as logger from invokeai.app.services.config import InvokeAIAppConfig -from invokeai.backend.util import CUDA_DEVICE +from invokeai.backend.util import CUDA_DEVICE, Chdir from .model_cache import ModelCache, ModelLocker from .models import ( BaseModelType, ModelType, SubModelType, @@ -488,11 +488,6 @@ class ModelManager(object): ) -> list[dict]: """ Return a list of models. - - Please use model_manager.models() to get all the model names, - model_manager.model_info('model-name') to get the stanza for the model - named 'model-name', and model_manager.config to get the full OmegaConf - object derived from models.yaml """ models = [] @@ -659,44 +654,82 @@ class ModelManager(object): def scan_models_directory(self): loaded_files = set() new_models_found = False - - for model_key, model_config in list(self.models.items()): - model_name, base_model, model_type = self.parse_key(model_key) - model_path = str(self.globals.root_path / model_config.path) - if not os.path.exists(model_path): - model_class = MODEL_CLASSES[base_model][model_type] - if model_class.save_to_config: - model_config.error = ModelError.NotFound + + with Chdir(self.globals.root_path): + for model_key, model_config in list(self.models.items()): + model_name, base_model, model_type = self.parse_key(model_key) + model_path = str(model_config.path) + if not os.path.exists(model_path): + model_class = MODEL_CLASSES[base_model][model_type] + if model_class.save_to_config: + model_config.error = ModelError.NotFound + else: + self.models.pop(model_key, None) else: - self.models.pop(model_key, None) - else: - loaded_files.add(model_path) + loaded_files.add(model_path) - for base_model in BaseModelType: - for model_type in ModelType: - model_class = MODEL_CLASSES[base_model][model_type] - models_dir = os.path.join(self.globals.models_path, base_model, model_type) + for base_model in BaseModelType: + for model_type in ModelType: + model_class = MODEL_CLASSES[base_model][model_type] + models_dir = os.path.join(self.globals.models_dir, base_model, model_type) - if not os.path.exists(models_dir): - continue # TODO: or create all folders? - - for entry_name in os.listdir(models_dir): - model_path = os.path.join(models_dir, entry_name) - if model_path not in loaded_files: # TODO: check - model_path = Path(model_path) - model_name = model_path.name if model_path.is_dir() else model_path.stem - model_key = self.create_key(model_name, base_model, model_type) + if not os.path.exists(models_dir): + continue # TODO: or create all folders? - if model_key in self.models: - raise Exception(f"Model with key {model_key} added twice") + for entry_name in os.listdir(models_dir): + model_path = os.path.join(models_dir, entry_name) + if model_path not in loaded_files: # TODO: check + model_path = Path(model_path) + model_name = model_path.name if model_path.is_dir() else model_path.stem + model_key = self.create_key(model_name, base_model, model_type) - model_config: ModelConfigBase = model_class.probe_config(str(model_path)) - self.models[model_key] = model_config - new_models_found = True + if model_key in self.models: + raise Exception(f"Model with key {model_key} added twice") - if new_models_found and self.config_path: + model_config: ModelConfigBase = model_class.probe_config(str(model_path)) + self.models[model_key] = model_config + new_models_found = True + + imported_models = self.autoimport() + + if (new_models_found or imported_models) and self.config_path: self.commit() + def autoimport(self): + ''' + Scan the autoimport directory (if defined) and import new models, delete defunct models. + ''' + # avoid circular import + from invokeai.backend.install.model_install_backend import ModelInstall + installer = ModelInstall(config = self.globals, + model_manager = self) + + installed = set() + if not self.globals.autoimport_dir: + return installed + + autodir = self.globals.root_path / self.globals.autoimport_dir + if not (autodir and autodir.exists()): + return installed + + known_paths = {(self.globals.root_path / x['path']).resolve() for x in self.list_models()} + scanned_dirs = set() + for root, dirs, files in os.walk(autodir): + for d in dirs: + path = Path(root) / d + if path in known_paths: + continue + if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]): + installed.update(installer.heuristic_install(path)) + scanned_dirs.add(path) + + for f in files: + path = Path(root) / f + if path in known_paths or path.parent in scanned_dirs: + continue + if path.suffix in {'.ckpt','.bin','.pth','.safetensors'}: + installed.update(installer.heuristic_install(path)) + return installed def heuristic_import(self, items_to_import: Set[str], @@ -724,8 +757,8 @@ class ModelManager(object): model_manager = self) for thing in items_to_import: try: - installer.heuristic_install(thing) - successfully_installed.add(thing) + installed = installer.heuristic_install(thing) + successfully_installed.update(installed) except Exception as e: self.logger.warning(f'{thing} could not be imported: {str(e)}') diff --git a/invokeai/backend/util/__init__.py b/invokeai/backend/util/__init__.py index 84720b1854..fadeff4d75 100644 --- a/invokeai/backend/util/__init__.py +++ b/invokeai/backend/util/__init__.py @@ -16,6 +16,7 @@ from .util import ( download_with_resume, instantiate_from_config, url_attachment_name, + Chdir ) diff --git a/invokeai/backend/util/util.py b/invokeai/backend/util/util.py index 9c6a61a4fa..1cc632e483 100644 --- a/invokeai/backend/util/util.py +++ b/invokeai/backend/util/util.py @@ -381,3 +381,18 @@ def image_to_dataURL(image: Image.Image, image_format: str = "PNG") -> str: buffered.getvalue() ).decode("UTF-8") return image_base64 + +class Chdir(object): + '''Context manager to chdir to desired directory and change back after context exits: + Args: + path (Path): The path to the cwd + ''' + def __init__(self, path: Path): + self.path = path + self.original = Path().absolute() + + def __enter__(self): + os.chdir(self.path) + + def __exit__(self,*args): + os.chdir(self.original) diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index a8d7f53940..183be03173 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -65,8 +65,8 @@ def make_printable(s:str)->str: return s.translate(NOPRINT_TRANS_TABLE) class addModelsForm(CyclingForm, npyscreen.FormMultiPage): - # for responsive resizing - disabled - # FIX_MINIMUM_SIZE_WHEN_CREATED = False + # for responsive resizing set to False, but this seems to cause a crash! + FIX_MINIMUM_SIZE_WHEN_CREATED = True # for persistence current_tab = 0 @@ -323,7 +323,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): FileBox, max_height=3, name=label, - value=str(config.autoimport_dir) if config.autoimport_dir else None, + value=str(config.root_path / config.autoimport_dir) if config.autoimport_dir else None, select_dir=True, must_exist=True, use_two_lines=False, @@ -501,7 +501,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): # rebuild the form, saving and restoring some of the fields that need to be preserved. saved_messages = self.monitor.entry_widget.values - autoload_dir = self.pipeline_models['autoload_directory'].value + autoload_dir = str(config.root_path / self.pipeline_models['autoload_directory'].value) autoscan = self.pipeline_models['autoscan_on_startup'].value app.main_form = app.addForm( @@ -547,7 +547,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): # load directory and whether to scan on startup if self.parentApp.autoload_pending: - selections.scan_directory = self.pipeline_models['autoload_directory'].value + selections.scan_directory = str(config.root_path / self.pipeline_models['autoload_directory'].value) self.parentApp.autoload_pending = False selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value From 23c22ac933a957dbaf1c78e6adf4636f7b56e875 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 26 Jun 2023 03:07:54 +0300 Subject: [PATCH 29/47] Refactor logic/small fixes --- .../backend/model_management/model_manager.py | 127 ++++++++++-------- 1 file changed, 69 insertions(+), 58 deletions(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 5fcb13a48e..9cc940c9b7 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -311,7 +311,7 @@ class ModelManager(object): self.models[model_key] = model_class.create_config(**model_config) # check config version number and update on disk/RAM if necessary - self.globals = InvokeAIAppConfig.get_config() + self.app_config = InvokeAIAppConfig.get_config() self.logger = logger self.cache = ModelCache( max_cache_size=max_cache_size, @@ -362,6 +362,9 @@ class ModelManager(object): return (model_name, base_model, model_type) + def _get_model_cache_path(self, model_path): + return self.app_config.models_path / ".cache" / hashlib.md5(str(model_path).encode()).hexdigest() + def get_model( self, model_name: str, @@ -382,37 +385,21 @@ class ModelManager(object): # if model not found try to find it (maybe file just pasted) if model_key not in self.models: - # TODO: find by mask or try rescan? - path_mask = f"/models/{base_model}/{model_type}/{model_name}*" - if False: # model_path = next(find_by_mask(path_mask)): - model_path = None # TODO: - model_config = model_class.probe_config(model_path) - self.models[model_key] = model_config - else: + self.scan_models_directory(base_model=base_model, model_type=model_type) + if model_key not in self.models: raise Exception(f"Model not found - {model_key}") - # if it known model check that target path exists (if manualy deleted) - else: - # logic repeated twice(in rescan too) any way to optimize? - model_path = self.globals.root_path / self.models[model_key].path - if not model_path.exists(): - if model_class.save_to_config: - self.models[model_key].error = ModelError.NotFound - raise Exception(f"Files for model \"{model_key}\" not found") - - else: - self.models.pop(model_key, None) - raise Exception(f"Model not found - {model_key}") - - # reset model errors? - model_config = self.models[model_key] + model_path = self.app_config.root_path / model_config.path - # /models/{base_model}/{model_type}/{name}.ckpt or .safentesors - # /models/{base_model}/{model_type}/{name}/ - # massage relative paths into absolute ones - model_path = model_path or self.globals.root_path / model_config.path - model_config.path = model_path + if not model_path.exists(): + if model_class.save_to_config: + self.models[model_key].error = ModelError.NotFound + raise Exception(f"Files for model \"{model_key}\" not found") + + else: + self.models.pop(model_key, None) + raise Exception(f"Model not found - {model_key}") # vae/movq override # TODO: @@ -426,7 +413,7 @@ class ModelManager(object): # TODO: path # TODO: is it accurate to use path as id - dst_convert_path = self.globals.models_dir / ".cache" / hashlib.md5(str(model_path).encode()).hexdigest() + dst_convert_path = self._get_model_cache_path(model_path) model_path = model_class.convert_if_required( base_model=base_model, model_path=model_path, @@ -547,9 +534,12 @@ class ModelManager(object): self.cache.uncache_model(cache_id) # if model inside invoke models folder - delete files - model_path = self.globals.root_path / model_cfg.path + model_path = self.app_config.root_path / model_cfg.path + cache_path = self._get_model_cache_path(model_path) + if cache_path.exists(): + rmtree(str(cache_path)) - if model_path.is_relative_to(self.globals.models_path): + if model_path.is_relative_to(self.app_config.models_path): if model_path.is_dir(): rmtree(str(model_path)) else: @@ -576,18 +566,30 @@ class ModelManager(object): model_config = model_class.create_config(**model_attributes) model_key = self.create_key(model_name, base_model, model_type) - assert ( - clobber or model_key not in self.models - ), f'attempt to overwrite existing model definition "{model_key}"' + if clobber or model_key not in self.models: + raise Exception(f'Attempt to overwrite existing model definition "{model_key}"') - self.models[model_key] = model_config - - if clobber and model_key in self.cache_keys: + old_model = self.models.pop(model_key, False) + if old_model is not None: + # TODO: if path changed and old_model.path inside models folder should we delete this too? + + # remove conversion cache as config changed + old_model_path = self.app_config.root_path / old_model.path + old_model_cache = self._get_model_cache_path(old_model_path) + if old_model_cache.exists(): + if old_model_cache.is_dir(): + rmtree(str(old_model_cache)) + else: + old_model_cache.unlink() + + # remove in-memory cache # note: it not garantie to release memory(model can has other references) cache_ids = self.cache_keys.pop(model_key, []) for cache_id in cache_ids: self.cache.uncache_model(cache_id) + self.models[model_key] = model_config + def search_models(self, search_folder): self.logger.info(f"Finding Models In: {search_folder}") models_folder_ckpt = Path(search_folder).glob("**/*.ckpt") @@ -628,7 +630,7 @@ class ModelManager(object): yaml_str = OmegaConf.to_yaml(data_to_save) config_file_path = conf_file or self.config_path assert config_file_path is not None,'no config file path to write to' - config_file_path = self.globals.root_dir / config_file_path + config_file_path = self.app_config.root_path / config_file_path tmpfile = os.path.join(os.path.dirname(config_file_path), "new_config.tmp") with open(tmpfile, "w", encoding="utf-8") as outfile: outfile.write(self.preamble()) @@ -651,16 +653,20 @@ class ModelManager(object): """ ) - def scan_models_directory(self): + def scan_models_directory( + self, + base_model: Optional[BaseModelType] = None, + model_type: Optional[ModelType] = None, + ): loaded_files = set() new_models_found = False - with Chdir(self.globals.root_path): + with Chdir(self.app_config.root_path): for model_key, model_config in list(self.models.items()): - model_name, base_model, model_type = self.parse_key(model_key) - model_path = str(model_config.path) - if not os.path.exists(model_path): - model_class = MODEL_CLASSES[base_model][model_type] + model_name, cur_base_model, cur_model_type = self.parse_key(model_key) + model_path = self.app_config.root_path / model_config.path + if not model_path.exists(): + model_class = MODEL_CLASSES[cur_base_model][cur_model_type] if model_class.save_to_config: model_config.error = ModelError.NotFound else: @@ -668,24 +674,29 @@ class ModelManager(object): else: loaded_files.add(model_path) - for base_model in BaseModelType: - for model_type in ModelType: - model_class = MODEL_CLASSES[base_model][model_type] - models_dir = os.path.join(self.globals.models_dir, base_model, model_type) + for cur_base_model in BaseModelType: + if base_model is not None and cur_base_model != base_model: + continue - if not os.path.exists(models_dir): + for cur_model_type in ModelType: + if model_type is not None and cur_model_type != model_type: + continue + model_class = MODEL_CLASSES[cur_base_model][cur_model_type] + models_dir = self.app_config.models_path / cur_base_model.value / cur_model_type.value + + if not models_dir.exists(): continue # TODO: or create all folders? - for entry_name in os.listdir(models_dir): - model_path = os.path.join(models_dir, entry_name) + for model_path in models_dir.iterdir(): if model_path not in loaded_files: # TODO: check - model_path = Path(model_path) model_name = model_path.name if model_path.is_dir() else model_path.stem - model_key = self.create_key(model_name, base_model, model_type) + model_key = self.create_key(model_name, cur_base_model, cur_model_type) if model_key in self.models: raise Exception(f"Model with key {model_key} added twice") + if model_path.is_relative_to(self.app_config.root_path): + model_path = model_path.relative_to(self.app_config.root_path) model_config: ModelConfigBase = model_class.probe_config(str(model_path)) self.models[model_key] = model_config new_models_found = True @@ -701,18 +712,18 @@ class ModelManager(object): ''' # avoid circular import from invokeai.backend.install.model_install_backend import ModelInstall - installer = ModelInstall(config = self.globals, + installer = ModelInstall(config = self.app_config, model_manager = self) installed = set() - if not self.globals.autoimport_dir: + if not self.app_config.autoimport_dir: return installed - autodir = self.globals.root_path / self.globals.autoimport_dir + autodir = self.app_config.root_path / self.app_config.autoimport_dir if not (autodir and autodir.exists()): return installed - known_paths = {(self.globals.root_path / x['path']).resolve() for x in self.list_models()} + known_paths = {(self.app_config.root_path / x['path']).resolve() for x in self.list_models()} scanned_dirs = set() for root, dirs, files in os.walk(autodir): for d in dirs: @@ -752,7 +763,7 @@ class ModelManager(object): from invokeai.backend.install.model_install_backend import ModelInstall successfully_installed = set() - installer = ModelInstall(config = self.globals, + installer = ModelInstall(config = self.app_config, prediction_type_helper = prediction_type_helper, model_manager = self) for thing in items_to_import: From 1ba94a92b3b3d11faf8ae8c7ca487dfe1b59ee20 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 26 Jun 2023 03:54:42 +0300 Subject: [PATCH 30/47] Fixes --- invokeai/backend/model_management/model_manager.py | 4 ++-- invokeai/backend/model_management/models/stable_diffusion.py | 4 ---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 9cc940c9b7..74b8ac493b 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -416,7 +416,7 @@ class ModelManager(object): dst_convert_path = self._get_model_cache_path(model_path) model_path = model_class.convert_if_required( base_model=base_model, - model_path=model_path, + model_path=str(model_path), # TODO: refactor str/Path types logic output_path=dst_convert_path, config=model_config, ) @@ -569,7 +569,7 @@ class ModelManager(object): if clobber or model_key not in self.models: raise Exception(f'Attempt to overwrite existing model definition "{model_key}"') - old_model = self.models.pop(model_key, False) + old_model = self.models.pop(model_key, None) if old_model is not None: # TODO: if path changed and old_model.path inside models folder should we delete this too? diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py index f5112dfebb..68a2066fc5 100644 --- a/invokeai/backend/model_management/models/stable_diffusion.py +++ b/invokeai/backend/model_management/models/stable_diffusion.py @@ -111,8 +111,6 @@ class StableDiffusion1Model(DiffusersModel): config: ModelConfigBase, base_model: BaseModelType, ) -> str: - assert model_path == config.path - if isinstance(config, cls.CheckpointConfig): return _convert_ckpt_and_cache( version=BaseModelType.StableDiffusion1, @@ -215,8 +213,6 @@ class StableDiffusion2Model(DiffusersModel): config: ModelConfigBase, base_model: BaseModelType, ) -> str: - assert model_path == config.path - if isinstance(config, cls.CheckpointConfig): return _convert_ckpt_and_cache( version=BaseModelType.StableDiffusion2, From 47e651225d1215df52fc5f72a4496797bfc32cc8 Mon Sep 17 00:00:00 2001 From: Eugene Brodsky Date: Mon, 26 Jun 2023 01:30:21 -0400 Subject: [PATCH 31/47] query for 'main' model type when populating UI lists to support renaming of 'pipeline' models to 'main' --- invokeai/frontend/web/src/app/components/App.tsx | 2 +- .../nodes/components/fields/ModelInputFieldComponent.tsx | 2 +- .../frontend/web/src/features/system/components/ModelSelect.tsx | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/frontend/web/src/app/components/App.tsx b/invokeai/frontend/web/src/app/components/App.tsx index 3f1f2cf7a6..c93bd8791c 100644 --- a/invokeai/frontend/web/src/app/components/App.tsx +++ b/invokeai/frontend/web/src/app/components/App.tsx @@ -48,7 +48,7 @@ const App = ({ const isApplicationReady = useIsApplicationReady(); const { data: pipelineModels } = useListModelsQuery({ - model_type: 'pipeline', + model_type: 'main', }); const { data: controlnetModels } = useListModelsQuery({ model_type: 'controlnet', diff --git a/invokeai/frontend/web/src/features/nodes/components/fields/ModelInputFieldComponent.tsx b/invokeai/frontend/web/src/features/nodes/components/fields/ModelInputFieldComponent.tsx index 895d763882..741662655f 100644 --- a/invokeai/frontend/web/src/features/nodes/components/fields/ModelInputFieldComponent.tsx +++ b/invokeai/frontend/web/src/features/nodes/components/fields/ModelInputFieldComponent.tsx @@ -23,7 +23,7 @@ const ModelInputFieldComponent = ( const { t } = useTranslation(); const { data: pipelineModels } = useListModelsQuery({ - model_type: 'pipeline', + model_type: 'main', }); const data = useMemo(() => { diff --git a/invokeai/frontend/web/src/features/system/components/ModelSelect.tsx b/invokeai/frontend/web/src/features/system/components/ModelSelect.tsx index 916f70ef9a..f9eda624f2 100644 --- a/invokeai/frontend/web/src/features/system/components/ModelSelect.tsx +++ b/invokeai/frontend/web/src/features/system/components/ModelSelect.tsx @@ -24,7 +24,7 @@ const ModelSelect = () => { ); const { data: pipelineModels } = useListModelsQuery({ - model_type: 'pipeline', + model_type: 'main', }); const data = useMemo(() => { From 873c18bc4b6b1645e729bb920c9e966f6008042a Mon Sep 17 00:00:00 2001 From: user1 Date: Mon, 26 Jun 2023 04:27:26 -0700 Subject: [PATCH 32/47] Added TileResampler ControlNet preprocessor node. Also fixes to SegmentAnything ControlNet preprocessor node. --- .../controlnet_image_processors.py | 45 +++++++++++++++++-- 1 file changed, 42 insertions(+), 3 deletions(-) diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 2bd0a5cf04..870f14dc27 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -1,8 +1,9 @@ -# InvokeAI nodes for ControlNet image preprocessors +# Invocations for ControlNet image preprocessors # initial implementation by Gregg Helt, 2023 # heavily leverages controlnet_aux package: https://github.com/patrickvonplaten/controlnet_aux from builtins import float, bool +import cv2 import numpy as np from typing import Literal, Optional, Union, List, Dict from PIL import Image, ImageFilter, ImageOps @@ -33,7 +34,7 @@ from controlnet_aux import ( # LeresDetector, ) -from controlnet_aux.util import ade_palette +from controlnet_aux.util import HWC3, ade_palette from .image import ImageOutput, PILInvocationConfig @@ -483,6 +484,43 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation, PILInvocationCo # image_resolution=self.image_resolution) # return processed_image + +class TileResamplerProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig): + + # fmt: off + type: Literal["tile_image_processor"] = "tile_image_processor" + # Inputs + #res: int = Field(default=512, ge=0, le=1024, description="The pixel resolution for each tile") + down_sampling_rate: float = Field(default=1.0, ge=1.0, le=8.0, description="Down sampling rate") + # fmt: on + + # tile_resample copied from sd-webui-controlnet/scripts/processor.py + def tile_resample(self, + np_img: np.ndarray, + res=512, # never used? + down_sampling_rate=1.0, + ): + np_img = HWC3(np_img) + if down_sampling_rate < 1.1: + return np_img + H, W, C = np_img.shape + H = int(float(H) / float(down_sampling_rate)) + W = int(float(W) / float(down_sampling_rate)) + np_img = cv2.resize(np_img, (W, H), interpolation=cv2.INTER_AREA) + return np_img + + def run_processor(self, img): + np_img = np.array(img, dtype=np.uint8) + processed_np_image = self.tile_resample(np_img, + #res=self.tile_size, + down_sampling_rate=self.down_sampling_rate + ) + processed_image = Image.fromarray(processed_np_image) + return processed_image + + + + class SegmentAnythingProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig): """Applies segment anything processing to image""" # fmt: off @@ -492,7 +530,8 @@ class SegmentAnythingProcessorInvocation(ImageProcessorInvocation, PILInvocation def run_processor(self, image): # segment_anything_processor = SamDetector.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints") segment_anything_processor = SamDetectorReproducibleColors.from_pretrained("ybelkada/segment-anything", subfolder="checkpoints") - processed_image = segment_anything_processor(image) + np_img = np.array(image, dtype=np.uint8) + processed_image = segment_anything_processor(np_img) return processed_image class SamDetectorReproducibleColors(SamDetector): From af566adf566402b8522a92562b1e7a73891d1649 Mon Sep 17 00:00:00 2001 From: user1 Date: Mon, 26 Jun 2023 04:29:43 -0700 Subject: [PATCH 33/47] For MediapipeFace ControlNet preprocessor, if input image is RGBA format then convert to RGB (otherwise MediapipeFace image processing throws an error) --- invokeai/app/invocations/controlnet_image_processors.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 870f14dc27..8c354d9908 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -458,6 +458,10 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation, PILInvocationCo # fmt: on def run_processor(self, image): + # MediaPipeFaceDetector throws an error if image has alpha channel + # so convert to RGB if needed + if image.mode == 'RGBA': + image = image.convert('RGB') mediapipe_face_processor = MediapipeFaceDetector() processed_image = mediapipe_face_processor(image, max_faces=self.max_faces, min_confidence=self.min_confidence) return processed_image From e3f136cdda53577229a47972c5250e946e105106 Mon Sep 17 00:00:00 2001 From: sammyf <42468608+sammyf@users.noreply.github.com> Date: Mon, 26 Jun 2023 14:23:10 +0200 Subject: [PATCH 34/47] Update 060_INSTALL_PATCHMATCH.md installing the packaged 'blas' is needed in Archlinux, otherwise patchmatch fails initializing with a "libblas.so.3 missing" error. --- docs/installation/060_INSTALL_PATCHMATCH.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/installation/060_INSTALL_PATCHMATCH.md b/docs/installation/060_INSTALL_PATCHMATCH.md index 6ac6e2814e..ccfd19d207 100644 --- a/docs/installation/060_INSTALL_PATCHMATCH.md +++ b/docs/installation/060_INSTALL_PATCHMATCH.md @@ -87,18 +87,18 @@ Prior to installing PyPatchMatch, you need to take the following steps: sudo pacman -S --needed base-devel ``` -2. Install `opencv`: +2. Install `opencv` and `blas`: ```sh - sudo pacman -S opencv + sudo pacman -S opencv blas ``` or for CUDA support ```sh - sudo pacman -S opencv-cuda + sudo pacman -S opencv-cuda blas ``` - + 3. Fix the naming of the `opencv` package configuration file: ```sh From a2ddb3823b7ffc2d382b6ee722e851e8a73e5998 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 26 Jun 2023 13:33:38 -0400 Subject: [PATCH 35/47] fix add_model() logic --- invokeai/backend/model_management/model_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 74b8ac493b..c0d5122886 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -566,7 +566,7 @@ class ModelManager(object): model_config = model_class.create_config(**model_attributes) model_key = self.create_key(model_name, base_model, model_type) - if clobber or model_key not in self.models: + if model_key in self.models and not clobber: raise Exception(f'Attempt to overwrite existing model definition "{model_key}"') old_model = self.models.pop(model_key, None) @@ -706,7 +706,7 @@ class ModelManager(object): if (new_models_found or imported_models) and self.config_path: self.commit() - def autoimport(self): + def autoimport(self)->set[Path]: ''' Scan the autoimport directory (if defined) and import new models, delete defunct models. ''' From 10d2d85c83718ed79c1b933c539b09bfd072fe66 Mon Sep 17 00:00:00 2001 From: user1 Date: Mon, 26 Jun 2023 12:03:05 -0700 Subject: [PATCH 36/47] Started to add ControlNet resize_crop and resize_fill options, but commented out, not ready to deploy yet. --- invokeai/app/invocations/controlnet_image_processors.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 8c354d9908..01deebc9fa 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -101,6 +101,9 @@ CONTROLNET_DEFAULT_MODELS = [ CONTROLNET_NAME_VALUES = Literal[tuple(CONTROLNET_DEFAULT_MODELS)] CONTROLNET_MODE_VALUES = Literal[tuple(["balanced", "more_prompt", "more_control", "unbalanced"])] +# crop and fill options not ready yet +# CONTROLNET_RESIZE_VALUES = Literal[tuple(["just_resize", "crop_resize", "fill_resize"])] + class ControlField(BaseModel): image: ImageField = Field(default=None, description="The control image") @@ -111,7 +114,8 @@ class ControlField(BaseModel): description="When the ControlNet is first applied (% of total steps)") end_step_percent: float = Field(default=1, ge=0, le=1, description="When the ControlNet is last applied (% of total steps)") - control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The contorl mode to use") + control_mode: CONTROLNET_MODE_VALUES = Field(default="balanced", description="The control mode to use") + # resize_mode: CONTROLNET_RESIZE_VALUES = Field(default="just_resize", description="The resize mode to use") @validator("control_weight") def abs_le_one(cls, v): @@ -186,7 +190,7 @@ class ControlNetInvocation(BaseInvocation): ), ) -# TODO: move image processors to separate file (image_analysis.py + class ImageProcessorInvocation(BaseInvocation, PILInvocationConfig): """Base class for invocations that preprocess images for ControlNet""" From 823e098b7c06c026a1c692c2eb206559dd85a55b Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 26 Jun 2023 16:18:16 -0400 Subject: [PATCH 37/47] prompt user for prediction type when autoimporting a v2 model without .yaml file don't ask user for prediction type of a config.yaml provided --- invokeai/backend/install/model_install_backend.py | 1 - invokeai/backend/model_management/model_manager.py | 6 +++++- invokeai/backend/model_management/model_probe.py | 3 ++- invokeai/frontend/install/model_install.py | 6 +++--- 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index dcc0eac902..ac25316d9e 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -411,7 +411,6 @@ def update_autoimport_dir(autodir: Path): outfile.write(yaml) tmpfile.replace(invokeai_config_path) - # ------------------------------------- def yes_or_no(prompt: str, default_yes=True): default = "y" if default_yes else "n" diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index c0d5122886..b88550d63b 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -712,8 +712,12 @@ class ModelManager(object): ''' # avoid circular import from invokeai.backend.install.model_install_backend import ModelInstall + from invokeai.frontend.install.model_install import ask_user_for_prediction_type + installer = ModelInstall(config = self.app_config, - model_manager = self) + model_manager = self, + prediction_type_helper = ask_user_for_prediction_type, + ) installed = set() if not self.app_config.autoimport_dir: diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py index 2b6eb7e7be..42f4bb6225 100644 --- a/invokeai/backend/model_management/model_probe.py +++ b/invokeai/backend/model_management/model_probe.py @@ -255,7 +255,8 @@ class PipelineCheckpointProbe(CheckpointProbeBase): return SchedulerPredictionType.Epsilon elif checkpoint["global_step"] == 110000: return SchedulerPredictionType.VPrediction - if self.checkpoint_path and self.helper: + if self.checkpoint_path and self.helper \ + and not self.checkpoint_path.with_suffix('.yaml').exists(): # if a .yaml config file exists, then this step not needed return self.helper(self.checkpoint_path) else: return None diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 183be03173..900426eac6 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -578,14 +578,14 @@ class StderrToMessage(): # -------------------------------------------------------- def ask_user_for_prediction_type(model_path: Path, tui_conn: Connection=None - )->Path: + )->SchedulerPredictionType: if tui_conn: logger.debug('Waiting for user response...') return _ask_user_for_pt_tui(model_path, tui_conn) else: return _ask_user_for_pt_cmdline(model_path) -def _ask_user_for_pt_cmdline(model_path): +def _ask_user_for_pt_cmdline(model_path: Path)->SchedulerPredictionType: choices = [SchedulerPredictionType.Epsilon, SchedulerPredictionType.VPrediction, None] print( f""" @@ -608,7 +608,7 @@ Please select the type of the V2 checkpoint named {model_path.name}: return return choice -def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection)->Path: +def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection)->SchedulerPredictionType: try: tui_conn.send_bytes(f'*need v2 config for:{model_path}'.encode('utf-8')) # note that we don't do any status checking here From 044fe6bb20710cdbe44e417b55220a7826c67e25 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 26 Jun 2023 17:48:06 -0400 Subject: [PATCH 38/47] remove dangling debug statement --- invokeai/backend/install/model_install_backend.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index ac25316d9e..58cc52aa11 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -399,9 +399,6 @@ def update_autoimport_dir(autodir: Path): ''' Update the "autoimport_dir" option in invokeai.yaml ''' - with open('log.txt','a') as f: - print(f'autodir = {autodir}',file=f) - invokeai_config_path = config.init_file_path conf = OmegaConf.load(invokeai_config_path) conf.InvokeAI.Paths.autoimport_dir = str(autodir) if autodir else None From f15d28d141912cffdc1d5998931e0e45f0ea0ce5 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Mon, 26 Jun 2023 20:30:08 -0400 Subject: [PATCH 39/47] improved wording of v2 selection prompt --- invokeai/frontend/install/model_install.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 214b3632f3..980e9b6329 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -590,8 +590,8 @@ def _ask_user_for_pt_cmdline(model_path: Path)->SchedulerPredictionType: print( f""" Please select the type of the V2 checkpoint named {model_path.name}: -[1] A Stable Diffusion v2.x base model (512 pixels; there should be no 'parameterization:' line in its yaml file) -[2] A Stable Diffusion v2.x v-predictive model (768 pixels; look for a 'parameterization: "v"' line in its yaml file) +[1] A model based on Stable Diffusion v2 trained on 512 pixel images (SD-2-base) +[2] A model based on Stable Diffusion v2 trained on 768 pixel images (SD-2-768) [3] Skip this model and come back later. """ ) From 2e14528e4c2fffa9b23415f0a016bc05bd09090b Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:57:31 +1000 Subject: [PATCH 40/47] feat(nodes): default to CPU noise --- invokeai/app/invocations/latent.py | 77 +------------- invokeai/app/invocations/noise.py | 134 ++++++++++++++++++++++++ invokeai/app/services/default_graphs.py | 3 +- 3 files changed, 137 insertions(+), 77 deletions(-) create mode 100644 invokeai/app/invocations/noise.py diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index 015ecab211..f42743eb62 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -23,7 +23,7 @@ from ...backend.stable_diffusion.diffusers_pipeline import ( from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import \ PostprocessingSettings from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP -from ...backend.util.devices import choose_torch_device, torch_dtype +from ...backend.util.devices import torch_dtype from ...backend.model_management.lora import ModelPatcher from .baseinvocation import (BaseInvocation, BaseInvocationOutput, InvocationConfig, InvocationContext) @@ -59,31 +59,12 @@ def build_latents_output(latents_name: str, latents: torch.Tensor): height=latents.size()[2] * 8, ) -class NoiseOutput(BaseInvocationOutput): - """Invocation noise output""" - #fmt: off - type: Literal["noise_output"] = "noise_output" - - # Inputs - noise: LatentsField = Field(default=None, description="The output noise") - width: int = Field(description="The width of the noise in pixels") - height: int = Field(description="The height of the noise in pixels") - #fmt: on - -def build_noise_output(latents_name: str, latents: torch.Tensor): - return NoiseOutput( - noise=LatentsField(latents_name=latents_name), - width=latents.size()[3] * 8, - height=latents.size()[2] * 8, - ) - SAMPLER_NAME_VALUES = Literal[ tuple(list(SCHEDULER_MAP.keys())) ] - def get_scheduler( context: InvocationContext, scheduler_info: ModelInfo, @@ -105,62 +86,6 @@ def get_scheduler( return scheduler -def get_noise(width:int, height:int, device:torch.device, seed:int = 0, latent_channels:int=4, use_mps_noise:bool=False, downsampling_factor:int = 8): - # limit noise to only the diffusion image channels, not the mask channels - input_channels = min(latent_channels, 4) - use_device = "cpu" if (use_mps_noise or device.type == "mps") else device - generator = torch.Generator(device=use_device).manual_seed(seed) - x = torch.randn( - [ - 1, - input_channels, - height // downsampling_factor, - width // downsampling_factor, - ], - dtype=torch_dtype(device), - device=use_device, - generator=generator, - ).to(device) - # if self.perlin > 0.0: - # perlin_noise = self.get_perlin_noise( - # width // self.downsampling_factor, height // self.downsampling_factor - # ) - # x = (1 - self.perlin) * x + self.perlin * perlin_noise - return x - -class NoiseInvocation(BaseInvocation): - """Generates latent noise.""" - - type: Literal["noise"] = "noise" - - # Inputs - seed: int = Field(ge=0, le=SEED_MAX, description="The seed to use", default_factory=get_random_seed) - width: int = Field(default=512, multiple_of=8, gt=0, description="The width of the resulting noise", ) - height: int = Field(default=512, multiple_of=8, gt=0, description="The height of the resulting noise", ) - - - # Schema customisation - class Config(InvocationConfig): - schema_extra = { - "ui": { - "tags": ["latents", "noise"], - }, - } - - @validator("seed", pre=True) - def modulo_seed(cls, v): - """Returns the seed modulo SEED_MAX to ensure it is within the valid range.""" - return v % SEED_MAX - - def invoke(self, context: InvocationContext) -> NoiseOutput: - device = torch.device(choose_torch_device()) - noise = get_noise(self.width, self.height, device, self.seed) - - name = f'{context.graph_execution_state_id}__{self.id}' - context.services.latents.save(name, noise) - return build_noise_output(latents_name=name, latents=noise) - - # Text to image class TextToLatentsInvocation(BaseInvocation): """Generates latents from conditionings.""" diff --git a/invokeai/app/invocations/noise.py b/invokeai/app/invocations/noise.py new file mode 100644 index 0000000000..c5866f3608 --- /dev/null +++ b/invokeai/app/invocations/noise.py @@ -0,0 +1,134 @@ +# Copyright (c) 2023 Kyle Schouviller (https://github.com/kyle0654) & the InvokeAI Team + +import math +from typing import Literal + +from pydantic import Field, validator +import torch +from invokeai.app.invocations.latent import LatentsField + +from invokeai.app.util.misc import SEED_MAX, get_random_seed +from ...backend.util.devices import choose_torch_device, torch_dtype +from .baseinvocation import ( + BaseInvocation, + BaseInvocationOutput, + InvocationConfig, + InvocationContext, +) + +""" +Utilities +""" + + +def get_noise( + width: int, + height: int, + device: torch.device, + seed: int = 0, + latent_channels: int = 4, + downsampling_factor: int = 8, + use_cpu: bool = True, + perlin: float = 0.0, +): + """Generate noise for a given image size.""" + noise_device_type = "cpu" if (use_cpu or device.type == "mps") else device.type + + # limit noise to only the diffusion image channels, not the mask channels + input_channels = min(latent_channels, 4) + generator = torch.Generator(device=noise_device_type).manual_seed(seed) + + noise_tensor = torch.randn( + [ + 1, + input_channels, + height // downsampling_factor, + width // downsampling_factor, + ], + dtype=torch_dtype(device), + device=noise_device_type, + generator=generator, + ).to(device) + + return noise_tensor + + +""" +Nodes +""" + + +class NoiseOutput(BaseInvocationOutput): + """Invocation noise output""" + + # fmt: off + type: Literal["noise_output"] = "noise_output" + + # Inputs + noise: LatentsField = Field(default=None, description="The output noise") + width: int = Field(description="The width of the noise in pixels") + height: int = Field(description="The height of the noise in pixels") + # fmt: on + + +def build_noise_output(latents_name: str, latents: torch.Tensor): + return NoiseOutput( + noise=LatentsField(latents_name=latents_name), + width=latents.size()[3] * 8, + height=latents.size()[2] * 8, + ) + + +class NoiseInvocation(BaseInvocation): + """Generates latent noise.""" + + type: Literal["noise"] = "noise" + + # Inputs + seed: int = Field( + ge=0, + le=SEED_MAX, + description="The seed to use", + default_factory=get_random_seed, + ) + width: int = Field( + default=512, + multiple_of=8, + gt=0, + description="The width of the resulting noise", + ) + height: int = Field( + default=512, + multiple_of=8, + gt=0, + description="The height of the resulting noise", + ) + use_cpu: bool = Field( + default=True, + description="Use CPU for noise generation (for reproducible results across platforms)", + ) + + # Schema customisation + class Config(InvocationConfig): + schema_extra = { + "ui": { + "tags": ["latents", "noise"], + }, + } + + @validator("seed", pre=True) + def modulo_seed(cls, v): + """Returns the seed modulo SEED_MAX to ensure it is within the valid range.""" + return v % SEED_MAX + + def invoke(self, context: InvocationContext) -> NoiseOutput: + noise = get_noise( + width=self.width, + height=self.height, + device=choose_torch_device(), + seed=self.seed, + use_cpu=self.use_cpu, + ) + name = f"{context.graph_execution_state_id}__{self.id}" + context.services.latents.save(name, noise) + return build_noise_output(latents_name=name, latents=noise) diff --git a/invokeai/app/services/default_graphs.py b/invokeai/app/services/default_graphs.py index 5eda5e957d..92263751b7 100644 --- a/invokeai/app/services/default_graphs.py +++ b/invokeai/app/services/default_graphs.py @@ -1,4 +1,5 @@ -from ..invocations.latent import LatentsToImageInvocation, NoiseInvocation, TextToLatentsInvocation +from ..invocations.latent import LatentsToImageInvocation, TextToLatentsInvocation +from ..invocations.noise import NoiseInvocation from ..invocations.compel import CompelInvocation from ..invocations.params import ParamIntInvocation from .graph import Edge, EdgeConnection, ExposedNodeInput, ExposedNodeOutput, Graph, LibraryGraph From 246298d1d6b4cbb4e58bc14552873d4d2df0f0b9 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:57:41 +1000 Subject: [PATCH 41/47] chore(ui): regen types --- .../frontend/web/src/services/api/schema.d.ts | 34 +++++++++++++------ 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/schema.d.ts b/invokeai/frontend/web/src/services/api/schema.d.ts index 164de579bb..767fe7b2b3 100644 --- a/invokeai/frontend/web/src/services/api/schema.d.ts +++ b/invokeai/frontend/web/src/services/api/schema.d.ts @@ -1030,7 +1030,7 @@ export type components = { * @description The nodes in this graph */ nodes?: { - [key: string]: (components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["PipelineModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["UpscaleInvocation"] | components["schemas"]["RestoreFaceInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]) | undefined; + [key: string]: (components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["PipelineModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["UpscaleInvocation"] | components["schemas"]["RestoreFaceInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]) | undefined; }; /** * Edges @@ -1073,7 +1073,7 @@ export type components = { * @description The results of node executions */ results: { - [key: string]: (components["schemas"]["ImageOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["PromptOutput"] | components["schemas"]["PromptCollectionOutput"] | components["schemas"]["CompelOutput"] | components["schemas"]["IntOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["IntCollectionOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]) | undefined; + [key: string]: (components["schemas"]["ImageOutput"] | components["schemas"]["MaskOutput"] | components["schemas"]["ControlOutput"] | components["schemas"]["ModelLoaderOutput"] | components["schemas"]["LoraLoaderOutput"] | components["schemas"]["PromptOutput"] | components["schemas"]["PromptCollectionOutput"] | components["schemas"]["CompelOutput"] | components["schemas"]["IntOutput"] | components["schemas"]["FloatOutput"] | components["schemas"]["LatentsOutput"] | components["schemas"]["IntCollectionOutput"] | components["schemas"]["FloatCollectionOutput"] | components["schemas"]["NoiseOutput"] | components["schemas"]["GraphInvocationOutput"] | components["schemas"]["IterateInvocationOutput"] | components["schemas"]["CollectInvocationOutput"]) | undefined; }; /** * Errors @@ -2917,7 +2917,7 @@ export type components = { /** ModelsList */ ModelsList: { /** Models */ - models: (components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"])[]; + models: (components["schemas"]["StableDiffusion1ModelCheckpointConfig"] | components["schemas"]["StableDiffusion1ModelDiffusersConfig"] | components["schemas"]["VaeModelConfig"] | components["schemas"]["LoRAModelConfig"] | components["schemas"]["ControlNetModelConfig"] | components["schemas"]["TextualInversionModelConfig"] | components["schemas"]["StableDiffusion2ModelCheckpointConfig"] | components["schemas"]["StableDiffusion2ModelDiffusersConfig"])[]; }; /** * MultiplyInvocation @@ -2993,6 +2993,18 @@ export type components = { * @default 512 */ height?: number; + /** + * Perlin + * @description The amount of perlin noise to add to the noise + * @default 0 + */ + perlin?: number; + /** + * Use Cpu + * @description Use CPU for noise generation (for reproducible results across platforms) + * @default true + */ + use_cpu?: boolean; }; /** * NoiseOutput @@ -4177,18 +4189,18 @@ export type components = { */ image?: components["schemas"]["ImageField"]; }; - /** - * StableDiffusion1ModelFormat - * @description An enumeration. - * @enum {string} - */ - StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; /** * StableDiffusion2ModelFormat * @description An enumeration. * @enum {string} */ StableDiffusion2ModelFormat: "checkpoint" | "diffusers"; + /** + * StableDiffusion1ModelFormat + * @description An enumeration. + * @enum {string} + */ + StableDiffusion1ModelFormat: "checkpoint" | "diffusers"; }; responses: never; parameters: never; @@ -4299,7 +4311,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["PipelineModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["UpscaleInvocation"] | components["schemas"]["RestoreFaceInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]; + "application/json": components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["PipelineModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["UpscaleInvocation"] | components["schemas"]["RestoreFaceInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]; }; }; responses: { @@ -4336,7 +4348,7 @@ export type operations = { }; requestBody: { content: { - "application/json": components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["PipelineModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["UpscaleInvocation"] | components["schemas"]["RestoreFaceInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]; + "application/json": components["schemas"]["LoadImageInvocation"] | components["schemas"]["ShowImageInvocation"] | components["schemas"]["ImageCropInvocation"] | components["schemas"]["ImagePasteInvocation"] | components["schemas"]["MaskFromAlphaInvocation"] | components["schemas"]["ImageMultiplyInvocation"] | components["schemas"]["ImageChannelInvocation"] | components["schemas"]["ImageConvertInvocation"] | components["schemas"]["ImageBlurInvocation"] | components["schemas"]["ImageResizeInvocation"] | components["schemas"]["ImageScaleInvocation"] | components["schemas"]["ImageLerpInvocation"] | components["schemas"]["ImageInverseLerpInvocation"] | components["schemas"]["ControlNetInvocation"] | components["schemas"]["ImageProcessorInvocation"] | components["schemas"]["PipelineModelLoaderInvocation"] | components["schemas"]["LoraLoaderInvocation"] | components["schemas"]["DynamicPromptInvocation"] | components["schemas"]["CompelInvocation"] | components["schemas"]["AddInvocation"] | components["schemas"]["SubtractInvocation"] | components["schemas"]["MultiplyInvocation"] | components["schemas"]["DivideInvocation"] | components["schemas"]["RandomIntInvocation"] | components["schemas"]["ParamIntInvocation"] | components["schemas"]["ParamFloatInvocation"] | components["schemas"]["TextToLatentsInvocation"] | components["schemas"]["LatentsToImageInvocation"] | components["schemas"]["ResizeLatentsInvocation"] | components["schemas"]["ScaleLatentsInvocation"] | components["schemas"]["ImageToLatentsInvocation"] | components["schemas"]["CvInpaintInvocation"] | components["schemas"]["RangeInvocation"] | components["schemas"]["RangeOfSizeInvocation"] | components["schemas"]["RandomRangeInvocation"] | components["schemas"]["FloatLinearRangeInvocation"] | components["schemas"]["StepParamEasingInvocation"] | components["schemas"]["NoiseInvocation"] | components["schemas"]["UpscaleInvocation"] | components["schemas"]["RestoreFaceInvocation"] | components["schemas"]["InpaintInvocation"] | components["schemas"]["InfillColorInvocation"] | components["schemas"]["InfillTileInvocation"] | components["schemas"]["InfillPatchMatchInvocation"] | components["schemas"]["GraphInvocation"] | components["schemas"]["IterateInvocation"] | components["schemas"]["CollectInvocation"] | components["schemas"]["CannyImageProcessorInvocation"] | components["schemas"]["HedImageProcessorInvocation"] | components["schemas"]["LineartImageProcessorInvocation"] | components["schemas"]["LineartAnimeImageProcessorInvocation"] | components["schemas"]["OpenposeImageProcessorInvocation"] | components["schemas"]["MidasDepthImageProcessorInvocation"] | components["schemas"]["NormalbaeImageProcessorInvocation"] | components["schemas"]["MlsdImageProcessorInvocation"] | components["schemas"]["PidiImageProcessorInvocation"] | components["schemas"]["ContentShuffleImageProcessorInvocation"] | components["schemas"]["ZoeDepthImageProcessorInvocation"] | components["schemas"]["MediapipeFaceProcessorInvocation"] | components["schemas"]["LatentsToLatentsInvocation"]; }; }; responses: { From 642db657c2b52d51eefb6918a1034625d6f059e9 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 27 Jun 2023 20:29:41 +1000 Subject: [PATCH 42/47] feat(ui): use max prompts for combinatorial, iterations for non-combinatorial --- .../components/ParamDynamicPromptsCollapse.tsx | 2 +- .../components/ParamDynamicPromptsMaxPrompts.tsx | 8 +++++--- .../nodes/util/graphBuilders/addDynamicPromptsToGraph.ts | 2 +- .../components/Parameters/Core/ParamIterations.tsx | 3 ++- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCollapse.tsx b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCollapse.tsx index eeaf1b81ec..1aefecf3e6 100644 --- a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCollapse.tsx +++ b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsCollapse.tsx @@ -35,8 +35,8 @@ const ParamDynamicPromptsCollapse = () => { withSwitch > - + ); diff --git a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx index ab56abaa35..19f02ae3e5 100644 --- a/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx +++ b/invokeai/frontend/web/src/features/dynamicPrompts/components/ParamDynamicPromptsMaxPrompts.tsx @@ -9,17 +9,18 @@ import { stateSelector } from 'app/store/store'; const selector = createSelector( stateSelector, (state) => { - const { maxPrompts } = state.dynamicPrompts; + const { maxPrompts, combinatorial } = state.dynamicPrompts; const { min, sliderMax, inputMax } = state.config.sd.dynamicPrompts.maxPrompts; - return { maxPrompts, min, sliderMax, inputMax }; + return { maxPrompts, min, sliderMax, inputMax, combinatorial }; }, defaultSelectorOptions ); const ParamDynamicPromptsMaxPrompts = () => { - const { maxPrompts, min, sliderMax, inputMax } = useAppSelector(selector); + const { maxPrompts, min, sliderMax, inputMax, combinatorial } = + useAppSelector(selector); const dispatch = useAppDispatch(); const handleChange = useCallback( @@ -36,6 +37,7 @@ const ParamDynamicPromptsMaxPrompts = () => { return ( { state.config.sd.iterations; const { iterations } = state.generation; const { shouldUseSliders } = state.ui; - const isDisabled = state.dynamicPrompts.isEnabled; + const isDisabled = + state.dynamicPrompts.isEnabled && state.dynamicPrompts.combinatorial; const step = state.hotkeys.shift ? fineStep : coarseStep; From 1f3e5582f468ded5e5c539355c6fd921191149b7 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Tue, 27 Jun 2023 11:23:21 +1000 Subject: [PATCH 43/47] feat(ui): add type extraction helpers --- .../frontend/web/src/services/api/types.d.ts | 122 +++++++++--------- 1 file changed, 60 insertions(+), 62 deletions(-) diff --git a/invokeai/frontend/web/src/services/api/types.d.ts b/invokeai/frontend/web/src/services/api/types.d.ts index a995d9c298..2a2f90f434 100644 --- a/invokeai/frontend/web/src/services/api/types.d.ts +++ b/invokeai/frontend/web/src/services/api/types.d.ts @@ -4,91 +4,89 @@ import { components } from './schema'; type schemas = components['schemas']; /** - * Helper type to extract the invocation type from the schema. - * Also flags the `type` property as required. + * Extracts the schema type from the schema. */ -type Invocation = O.Required; +type S = components['schemas'][T]; /** - * Types from the API, re-exported from the types generated by `openapi-typescript`. + * Extracts the node type from the schema. + * Also flags the `type` property as required. */ +type N = O.Required< + components['schemas'][T], + 'type' +>; // Images -export type ImageDTO = schemas['ImageDTO']; -export type BoardDTO = schemas['BoardDTO']; -export type BoardChanges = schemas['BoardChanges']; -export type ImageChanges = schemas['ImageRecordChanges']; -export type ImageCategory = schemas['ImageCategory']; -export type ResourceOrigin = schemas['ResourceOrigin']; -export type ImageField = schemas['ImageField']; +export type ImageDTO = S<'ImageDTO'>; +export type BoardDTO = S<'BoardDTO'>; +export type BoardChanges = S<'BoardChanges'>; +export type ImageChanges = S<'ImageRecordChanges'>; +export type ImageCategory = S<'ImageCategory'>; +export type ResourceOrigin = S<'ResourceOrigin'>; +export type ImageField = S<'ImageField'>; export type OffsetPaginatedResults_BoardDTO_ = - schemas['OffsetPaginatedResults_BoardDTO_']; + S<'OffsetPaginatedResults_BoardDTO_'>; export type OffsetPaginatedResults_ImageDTO_ = - schemas['OffsetPaginatedResults_ImageDTO_']; + S<'OffsetPaginatedResults_ImageDTO_'>; // Models -export type ModelType = schemas['ModelType']; -export type BaseModelType = schemas['BaseModelType']; -export type PipelineModelField = schemas['PipelineModelField']; -export type ModelsList = schemas['ModelsList']; +export type ModelType = S<'ModelType'>; +export type BaseModelType = S<'BaseModelType'>; +export type PipelineModelField = S<'PipelineModelField'>; +export type ModelsList = S<'ModelsList'>; // Graphs -export type Graph = schemas['Graph']; -export type Edge = schemas['Edge']; -export type GraphExecutionState = schemas['GraphExecutionState']; +export type Graph = S<'Graph'>; +export type Edge = S<'Edge'>; +export type GraphExecutionState = S<'GraphExecutionState'>; // General nodes -export type CollectInvocation = Invocation<'CollectInvocation'>; -export type IterateInvocation = Invocation<'IterateInvocation'>; -export type RangeInvocation = Invocation<'RangeInvocation'>; -export type RandomRangeInvocation = Invocation<'RandomRangeInvocation'>; -export type RangeOfSizeInvocation = Invocation<'RangeOfSizeInvocation'>; -export type InpaintInvocation = Invocation<'InpaintInvocation'>; -export type ImageResizeInvocation = Invocation<'ImageResizeInvocation'>; -export type RandomIntInvocation = Invocation<'RandomIntInvocation'>; -export type CompelInvocation = Invocation<'CompelInvocation'>; -export type DynamicPromptInvocation = Invocation<'DynamicPromptInvocation'>; -export type NoiseInvocation = Invocation<'NoiseInvocation'>; -export type TextToLatentsInvocation = Invocation<'TextToLatentsInvocation'>; -export type LatentsToLatentsInvocation = - Invocation<'LatentsToLatentsInvocation'>; -export type ImageToLatentsInvocation = Invocation<'ImageToLatentsInvocation'>; -export type LatentsToImageInvocation = Invocation<'LatentsToImageInvocation'>; -export type PipelineModelLoaderInvocation = - Invocation<'PipelineModelLoaderInvocation'>; +export type CollectInvocation = N<'CollectInvocation'>; +export type IterateInvocation = N<'IterateInvocation'>; +export type RangeInvocation = N<'RangeInvocation'>; +export type RandomRangeInvocation = N<'RandomRangeInvocation'>; +export type RangeOfSizeInvocation = N<'RangeOfSizeInvocation'>; +export type InpaintInvocation = N<'InpaintInvocation'>; +export type ImageResizeInvocation = N<'ImageResizeInvocation'>; +export type RandomIntInvocation = N<'RandomIntInvocation'>; +export type CompelInvocation = N<'CompelInvocation'>; +export type DynamicPromptInvocation = N<'DynamicPromptInvocation'>; +export type NoiseInvocation = N<'NoiseInvocation'>; +export type TextToLatentsInvocation = N<'TextToLatentsInvocation'>; +export type LatentsToLatentsInvocation = N<'LatentsToLatentsInvocation'>; +export type ImageToLatentsInvocation = N<'ImageToLatentsInvocation'>; +export type LatentsToImageInvocation = N<'LatentsToImageInvocation'>; +export type PipelineModelLoaderInvocation = N<'PipelineModelLoaderInvocation'>; // ControlNet Nodes -export type ControlNetInvocation = Invocation<'ControlNetInvocation'>; -export type CannyImageProcessorInvocation = - Invocation<'CannyImageProcessorInvocation'>; +export type ControlNetInvocation = N<'ControlNetInvocation'>; +export type CannyImageProcessorInvocation = N<'CannyImageProcessorInvocation'>; export type ContentShuffleImageProcessorInvocation = - Invocation<'ContentShuffleImageProcessorInvocation'>; -export type HedImageProcessorInvocation = - Invocation<'HedImageProcessorInvocation'>; + N<'ContentShuffleImageProcessorInvocation'>; +export type HedImageProcessorInvocation = N<'HedImageProcessorInvocation'>; export type LineartAnimeImageProcessorInvocation = - Invocation<'LineartAnimeImageProcessorInvocation'>; + N<'LineartAnimeImageProcessorInvocation'>; export type LineartImageProcessorInvocation = - Invocation<'LineartImageProcessorInvocation'>; + N<'LineartImageProcessorInvocation'>; export type MediapipeFaceProcessorInvocation = - Invocation<'MediapipeFaceProcessorInvocation'>; + N<'MediapipeFaceProcessorInvocation'>; export type MidasDepthImageProcessorInvocation = - Invocation<'MidasDepthImageProcessorInvocation'>; -export type MlsdImageProcessorInvocation = - Invocation<'MlsdImageProcessorInvocation'>; + N<'MidasDepthImageProcessorInvocation'>; +export type MlsdImageProcessorInvocation = N<'MlsdImageProcessorInvocation'>; export type NormalbaeImageProcessorInvocation = - Invocation<'NormalbaeImageProcessorInvocation'>; + N<'NormalbaeImageProcessorInvocation'>; export type OpenposeImageProcessorInvocation = - Invocation<'OpenposeImageProcessorInvocation'>; -export type PidiImageProcessorInvocation = - Invocation<'PidiImageProcessorInvocation'>; + N<'OpenposeImageProcessorInvocation'>; +export type PidiImageProcessorInvocation = N<'PidiImageProcessorInvocation'>; export type ZoeDepthImageProcessorInvocation = - Invocation<'ZoeDepthImageProcessorInvocation'>; + N<'ZoeDepthImageProcessorInvocation'>; // Node Outputs -export type ImageOutput = schemas['ImageOutput']; -export type MaskOutput = schemas['MaskOutput']; -export type PromptOutput = schemas['PromptOutput']; -export type IterateInvocationOutput = schemas['IterateInvocationOutput']; -export type CollectInvocationOutput = schemas['CollectInvocationOutput']; -export type LatentsOutput = schemas['LatentsOutput']; -export type GraphInvocationOutput = schemas['GraphInvocationOutput']; +export type ImageOutput = S<'ImageOutput'>; +export type MaskOutput = S<'MaskOutput'>; +export type PromptOutput = S<'PromptOutput'>; +export type IterateInvocationOutput = S<'IterateInvocationOutput'>; +export type CollectInvocationOutput = S<'CollectInvocationOutput'>; +export type LatentsOutput = S<'LatentsOutput'>; +export type GraphInvocationOutput = S<'GraphInvocationOutput'>; From e8ed0fad6c1148e4ccdb8824c6a8cfa237d89e90 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Tue, 27 Jun 2023 12:30:53 -0400 Subject: [PATCH 44/47] autoimport from embedding/controlnet/lora folders designated in startup file --- invokeai/app/services/config.py | 6 +- .../backend/install/invokeai_configure.py | 58 +++++++++---- .../backend/install/model_install_backend.py | 39 ++------- .../backend/model_management/model_manager.py | 83 ++++++++++++------- .../backend/model_management/model_probe.py | 37 +++++++-- .../backend/model_management/models/base.py | 2 +- invokeai/frontend/install/model_install.py | 70 ++++++++-------- 7 files changed, 172 insertions(+), 123 deletions(-) diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index 232cbe7932..e0f1ceeb25 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -374,8 +374,10 @@ setting environment variables INVOKEAI_. tiled_decode : bool = Field(default=False, description="Whether to enable tiled VAE decode (reduces memory consumption with some performance penalty)", category='Memory/Performance') root : Path = Field(default=_find_root(), description='InvokeAI runtime root directory', category='Paths') - autoimport_dir : Path = Field(default='autoimport', description='Path to a directory of models files to be imported on startup.', category='Paths') - autoconvert_dir : Path = Field(default=None, description='Deprecated configuration option.', category='Paths') + autoimport_dir : Path = Field(default='autoimport/main', description='Path to a directory of models files to be imported on startup.', category='Paths') + lora_dir : Path = Field(default='autoimport/lora', description='Path to a directory of LoRA/LyCORIS models to be imported on startup.', category='Paths') + embedding_dir : Path = Field(default='autoimport/embedding', description='Path to a directory of Textual Inversion embeddings to be imported on startup.', category='Paths') + controlnet_dir : Path = Field(default='autoimport/controlnet', description='Path to a directory of ControlNet embeddings to be imported on startup.', category='Paths') conf_path : Path = Field(default='configs/models.yaml', description='Path to models definition file', category='Paths') models_dir : Path = Field(default='models', description='Path to the models directory', category='Paths') legacy_conf_dir : Path = Field(default='configs/stable-diffusion', description='Path to directory of legacy checkpoint config files', category='Paths') diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index b8c171f526..5b713516be 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -442,6 +442,26 @@ to allow InvokeAI to download restricted styles & subjects from the "Concept Lib scroll_exit=True, ) self.nextrely += 1 + self.add_widget_intelligent( + npyscreen.FixedText, + value="Directories containing textual inversion, controlnet and LoRA models ( autocompletes, ctrl-N advances):", + editable=False, + color="CONTROL", + ) + self.autoimport_dirs = {} + for description, config_name, path in autoimport_paths(old_opts): + self.autoimport_dirs[config_name] = self.add_widget_intelligent( + npyscreen.TitleFilename, + name=description+':', + value=str(path), + select_dir=True, + must_exist=False, + use_two_lines=False, + labelColor="GOOD", + begin_entry_at=32, + scroll_exit=True + ) + self.nextrely += 1 self.add_widget_intelligent( npyscreen.TitleFixedText, name="== LICENSE ==", @@ -505,10 +525,6 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license bad_fields.append( f"The output directory does not seem to be valid. Please check that {str(Path(opt.outdir).parent)} is an existing directory." ) - # if not Path(opt.embedding_dir).parent.exists(): - # bad_fields.append( - # f"The embedding directory does not seem to be valid. Please check that {str(Path(opt.embedding_dir).parent)} is an existing directory." - # ) if len(bad_fields) > 0: message = "The following problems were detected and must be corrected:\n" for problem in bad_fields: @@ -528,12 +544,15 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license "max_loaded_models", "xformers_enabled", "always_use_cpu", -# "embedding_dir", -# "lora_dir", -# "controlnet_dir", ]: setattr(new_opts, attr, getattr(self, attr).value) + for attr in self.autoimport_dirs: + directory = Path(self.autoimport_dirs[attr].value) + if directory.is_relative_to(config.root_path): + directory = directory.relative_to(config.root_path) + setattr(new_opts, attr, directory) + new_opts.hf_token = self.hf_token.value new_opts.license_acceptance = self.license_acceptance.value new_opts.precision = PRECISION_CHOICES[self.precision.value[0]] @@ -595,22 +614,32 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections: else [models[x].path or models[x].repo_id for x in installer.recommended_models()] if program_opts.yes_to_all else list(), - scan_directory=None, - autoscan_on_startup=None, +# scan_directory=None, +# autoscan_on_startup=None, ) +# ------------------------------------- +def autoimport_paths(config: InvokeAIAppConfig): + return [ + ('Checkpoints & diffusers models', 'autoimport_dir', config.root_path / config.autoimport_dir), + ('LoRA/LyCORIS models', 'lora_dir', config.root_path / config.lora_dir), + ('Controlnet models', 'controlnet_dir', config.root_path / config.controlnet_dir), + ('Textual Inversion Embeddings', 'embedding_dir', config.root_path / config.embedding_dir), + ] + # ------------------------------------- def initialize_rootdir(root: Path, yes_to_all: bool = False): logger.info("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **") for name in ( "models", "databases", - "autoimport", "text-inversion-output", "text-inversion-training-data", "configs" ): os.makedirs(os.path.join(root, name), exist_ok=True) + for model_type in ModelType: + Path(root, 'autoimport', model_type.value).mkdir(parents=True, exist_ok=True) configs_src = Path(configs.__path__[0]) configs_dest = root / "configs" @@ -618,9 +647,8 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False): shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True) dest = root / 'models' - for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]: - for model_type in [ModelType.Main, ModelType.Vae, ModelType.Lora, - ModelType.ControlNet,ModelType.TextualInversion]: + for model_base in BaseModelType: + for model_type in ModelType: path = dest / model_base.value / model_type.value path.mkdir(parents=True, exist_ok=True) path = dest / 'core' @@ -632,9 +660,7 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False): } ) ) -# with open(root / 'invokeai.yaml','w') as f: -# f.write('#empty invokeai.yaml initialization file') - + # ------------------------------------- def run_console_ui( program_opts: Namespace, initfile: Path = None diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index 58cc52aa11..f6cde2c90f 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -70,8 +70,8 @@ class ModelInstallList: class InstallSelections(): install_models: List[str]= field(default_factory=list) remove_models: List[str]=field(default_factory=list) - scan_directory: Path = None - autoscan_on_startup: bool=False +# scan_directory: Path = None +# autoscan_on_startup: bool=False @dataclass class ModelLoadInfo(): @@ -155,8 +155,8 @@ class ModelInstall(object): def install(self, selections: InstallSelections): job = 1 jobs = len(selections.remove_models) + len(selections.install_models) - if selections.scan_directory: - jobs += 1 +# if selections.scan_directory: +# jobs += 1 # remove requested models for key in selections.remove_models: @@ -171,18 +171,8 @@ class ModelInstall(object): self.heuristic_install(path) job += 1 - # import from the scan directory, if any - if path := selections.scan_directory: - logger.info(f'Scanning and importing models from directory {path} [{job}/{jobs}]') - self.heuristic_install(path) - self.mgr.commit() - if selections.autoscan_on_startup and Path(selections.scan_directory).is_dir(): - update_autoimport_dir(selections.scan_directory) - else: - update_autoimport_dir(None) - def heuristic_install(self, model_path_id_or_url: Union[str,Path], models_installed: Set[Path]=None)->Set[Path]: @@ -237,7 +227,7 @@ class ModelInstall(object): self.mgr.add_model(model_name = model_name, base_model = info.base_type, model_type = info.model_type, - model_attributes = attributes + model_attributes = attributes, ) except Exception as e: logger.warning(f'{str(e)} Skipping registration.') @@ -309,11 +299,11 @@ class ModelInstall(object): return location.stem def _make_attributes(self, path: Path, info: ModelProbeInfo)->dict: - # convoluted way to retrieve the description from datasets - description = f'{info.base_type.value} {info.model_type.value} model' + model_name = path.name if path.is_dir() else path.stem + description = f'{info.base_type.value} {info.model_type.value} model {model_name}' if key := self.reverse_paths.get(self.current_id): if key in self.datasets: - description = self.datasets[key]['description'] + description = self.datasets[key].get('description') or description rel_path = self.relative_to_root(path) @@ -395,19 +385,6 @@ class ModelInstall(object): ''' return {v.get('path') or v.get('repo_id') : k for k, v in datasets.items()} -def update_autoimport_dir(autodir: Path): - ''' - Update the "autoimport_dir" option in invokeai.yaml - ''' - invokeai_config_path = config.init_file_path - conf = OmegaConf.load(invokeai_config_path) - conf.InvokeAI.Paths.autoimport_dir = str(autodir) if autodir else None - yaml = OmegaConf.to_yaml(conf) - tmpfile = invokeai_config_path.parent / "new_config.tmp" - with open(tmpfile, "w", encoding="utf-8") as outfile: - outfile.write(yaml) - tmpfile.replace(invokeai_config_path) - # ------------------------------------- def yes_or_no(prompt: str, default_yes=True): default = "y" if default_yes else "n" diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index b88550d63b..292b706176 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -168,11 +168,27 @@ structure at initialization time by scanning the models directory. The in-memory data structure can be resynchronized by calling `manager.scan_models_directory()`. -Files and folders placed inside the `autoimport_dir` (path defined in -`invokeai.yaml`, defaulting to `ROOTDIR/autoimport` will also be -scanned for new models at initialization time and added to -`models.yaml`. Files will not be moved from this location but -preserved in-place. +Files and folders placed inside the `autoimport` paths (paths +defined in `invokeai.yaml`) will also be scanned for new models at +initialization time and added to `models.yaml`. Files will not be +moved from this location but preserved in-place. These directories +are: + + configuration default description + ------------- ------- ----------- + autoimport_dir autoimport/main main models + lora_dir autoimport/lora LoRA/LyCORIS models + embedding_dir autoimport/embedding TI embeddings + controlnet_dir autoimport/controlnet ControlNet models + +In actuality, models located in any of these directories are scanned +to determine their type, so it isn't strictly necessary to organize +the different types in this way. This entry in `invokeai.yaml` will +recursively scan all subdirectories within `autoimport`, scan models +files it finds, and import them if recognized. + + Paths: + autoimport_dir: autoimport A model can be manually added using `add_model()` using the model's name, base model, type and a dict of model attributes. See @@ -208,6 +224,7 @@ checkpoint or safetensors file. The path points to a file or directory on disk. If a relative path, the root is the InvokeAI ROOTDIR. + """ from __future__ import annotations @@ -660,7 +677,7 @@ class ModelManager(object): ): loaded_files = set() new_models_found = False - + with Chdir(self.app_config.root_path): for model_key, model_config in list(self.models.items()): model_name, cur_base_model, cur_model_type = self.parse_key(model_key) @@ -720,30 +737,38 @@ class ModelManager(object): ) installed = set() - if not self.app_config.autoimport_dir: - return installed - - autodir = self.app_config.root_path / self.app_config.autoimport_dir - if not (autodir and autodir.exists()): - return installed - - known_paths = {(self.app_config.root_path / x['path']).resolve() for x in self.list_models()} + + config = self.app_config + known_paths = {(self.app_config.root_path / x['path']) for x in self.list_models()} scanned_dirs = set() - for root, dirs, files in os.walk(autodir): - for d in dirs: - path = Path(root) / d - if path in known_paths: - continue - if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]): - installed.update(installer.heuristic_install(path)) - scanned_dirs.add(path) - - for f in files: - path = Path(root) / f - if path in known_paths or path.parent in scanned_dirs: - continue - if path.suffix in {'.ckpt','.bin','.pth','.safetensors'}: - installed.update(installer.heuristic_install(path)) + + for autodir in [config.autoimport_dir, + config.lora_dir, + config.embedding_dir, + config.controlnet_dir]: + if autodir is None: + continue + + autodir = self.app_config.root_path / autodir + if not autodir.exists(): + continue + + for root, dirs, files in os.walk(autodir): + for d in dirs: + path = Path(root) / d + if path in known_paths or path.parent in scanned_dirs: + scanned_dirs.add(path) + continue + if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]): + installed.update(installer.heuristic_install(path)) + scanned_dirs.add(path) + + for f in files: + path = Path(root) / f + if path in known_paths or path.parent in scanned_dirs: + continue + if path.suffix in {'.ckpt','.bin','.pth','.safetensors','.pt'}: + installed.update(installer.heuristic_install(path)) return installed def heuristic_import(self, diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py index 42f4bb6225..2828cc7ab1 100644 --- a/invokeai/backend/model_management/model_probe.py +++ b/invokeai/backend/model_management/model_probe.py @@ -22,7 +22,7 @@ class ModelProbeInfo(object): variant_type: ModelVariantType prediction_type: SchedulerPredictionType upcast_attention: bool - format: Literal['diffusers','checkpoint'] + format: Literal['diffusers','checkpoint', 'lycoris'] image_size: int class ProbeBase(object): @@ -75,22 +75,23 @@ class ModelProbe(object): between V2-Base and V2-768 SD models. ''' if model_path: - format = 'diffusers' if model_path.is_dir() else 'checkpoint' + format_type = 'diffusers' if model_path.is_dir() else 'checkpoint' else: - format = 'diffusers' if isinstance(model,(ConfigMixin,ModelMixin)) else 'checkpoint' + format_type = 'diffusers' if isinstance(model,(ConfigMixin,ModelMixin)) else 'checkpoint' model_info = None try: model_type = cls.get_model_type_from_folder(model_path, model) \ - if format == 'diffusers' \ + if format_type == 'diffusers' \ else cls.get_model_type_from_checkpoint(model_path, model) - probe_class = cls.PROBES[format].get(model_type) + probe_class = cls.PROBES[format_type].get(model_type) if not probe_class: return None probe = probe_class(model_path, model, prediction_type_helper) base_type = probe.get_base_type() variant_type = probe.get_variant_type() prediction_type = probe.get_scheduler_prediction_type() + format = probe.get_format() model_info = ModelProbeInfo( model_type = model_type, base_type = base_type, @@ -116,10 +117,10 @@ class ModelProbe(object): if model_path.name == "learned_embeds.bin": return ModelType.TextualInversion - checkpoint = checkpoint or read_checkpoint_meta(model_path, scan=True) - checkpoint = checkpoint.get("state_dict", checkpoint) + ckpt = checkpoint if checkpoint else read_checkpoint_meta(model_path, scan=True) + ckpt = ckpt.get("state_dict", ckpt) - for key in checkpoint.keys(): + for key in ckpt.keys(): if any(key.startswith(v) for v in {"cond_stage_model.", "first_stage_model.", "model.diffusion_model."}): return ModelType.Main elif any(key.startswith(v) for v in {"encoder.conv_in", "decoder.conv_in"}): @@ -133,7 +134,7 @@ class ModelProbe(object): else: # diffusers-ti - if len(checkpoint) < 10 and all(isinstance(v, torch.Tensor) for v in checkpoint.values()): + if len(ckpt) < 10 and all(isinstance(v, torch.Tensor) for v in ckpt.values()): return ModelType.TextualInversion raise ValueError("Unable to determine model type") @@ -201,6 +202,9 @@ class ProbeBase(object): def get_scheduler_prediction_type(self)->SchedulerPredictionType: pass + def get_format(self)->str: + pass + class CheckpointProbeBase(ProbeBase): def __init__(self, checkpoint_path: Path, @@ -214,6 +218,9 @@ class CheckpointProbeBase(ProbeBase): def get_base_type(self)->BaseModelType: pass + def get_format(self)->str: + return 'checkpoint' + def get_variant_type(self)-> ModelVariantType: model_type = ModelProbe.get_model_type_from_checkpoint(self.checkpoint_path,self.checkpoint) if model_type != ModelType.Main: @@ -267,6 +274,9 @@ class VaeCheckpointProbe(CheckpointProbeBase): return BaseModelType.StableDiffusion1 class LoRACheckpointProbe(CheckpointProbeBase): + def get_format(self)->str: + return 'lycoris' + def get_base_type(self)->BaseModelType: checkpoint = self.checkpoint key1 = "lora_te_text_model_encoder_layers_0_mlp_fc1.lora_down.weight" @@ -286,6 +296,9 @@ class LoRACheckpointProbe(CheckpointProbeBase): return None class TextualInversionCheckpointProbe(CheckpointProbeBase): + def get_format(self)->str: + return None + def get_base_type(self)->BaseModelType: checkpoint = self.checkpoint if 'string_to_token' in checkpoint: @@ -332,6 +345,9 @@ class FolderProbeBase(ProbeBase): def get_variant_type(self)->ModelVariantType: return ModelVariantType.Normal + def get_format(self)->str: + return 'diffusers' + class PipelineFolderProbe(FolderProbeBase): def get_base_type(self)->BaseModelType: if self.model: @@ -387,6 +403,9 @@ class VaeFolderProbe(FolderProbeBase): return BaseModelType.StableDiffusion1 class TextualInversionFolderProbe(FolderProbeBase): + def get_format(self)->str: + return None + def get_base_type(self)->BaseModelType: path = self.folder_path / 'learned_embeds.bin' if not path.exists(): diff --git a/invokeai/backend/model_management/models/base.py b/invokeai/backend/model_management/models/base.py index 5a03f10212..afa62b2e4f 100644 --- a/invokeai/backend/model_management/models/base.py +++ b/invokeai/backend/model_management/models/base.py @@ -397,7 +397,7 @@ def read_checkpoint_meta(path: Union[str, Path], scan: bool = False): checkpoint = safetensors.torch.load_file(path, device="cpu") else: if scan: - scan_result = scan_file_path(checkpoint) + scan_result = scan_file_path(path) if scan_result.infected_files != 0: raise Exception(f"The model file \"{path}\" is potentially infected by malware. Aborting import.") checkpoint = torch.load(path, map_location=torch.device("meta")) diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 980e9b6329..04dabca590 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -131,7 +131,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): window_width=window_width, exclude = self.starter_models ) - self.pipeline_models['autoload_pending'] = True + # self.pipeline_models['autoload_pending'] = True bottom_of_table = max(bottom_of_table,self.nextrely) self.nextrely = top_of_table @@ -316,31 +316,31 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): **kwargs, ) - label = "Directory to scan for models to automatically import ( autocompletes):" - self.nextrely += 1 - widgets.update( - autoload_directory = self.add_widget_intelligent( - FileBox, - max_height=3, - name=label, - value=str(config.root_path / config.autoimport_dir) if config.autoimport_dir else None, - select_dir=True, - must_exist=True, - use_two_lines=False, - labelColor="DANGER", - begin_entry_at=len(label)+1, - scroll_exit=True, - ) - ) - widgets.update( - autoscan_on_startup = self.add_widget_intelligent( - npyscreen.Checkbox, - name="Scan and import from this directory each time InvokeAI starts", - value=config.autoimport_dir is not None, - relx=4, - scroll_exit=True, - ) - ) + # label = "Directory to scan for models to automatically import ( autocompletes):" + # self.nextrely += 1 + # widgets.update( + # autoload_directory = self.add_widget_intelligent( + # FileBox, + # max_height=3, + # name=label, + # value=str(config.root_path / config.autoimport_dir) if config.autoimport_dir else None, + # select_dir=True, + # must_exist=True, + # use_two_lines=False, + # labelColor="DANGER", + # begin_entry_at=len(label)+1, + # scroll_exit=True, + # ) + # ) + # widgets.update( + # autoscan_on_startup = self.add_widget_intelligent( + # npyscreen.Checkbox, + # name="Scan and import from this directory each time InvokeAI starts", + # value=config.autoimport_dir is not None, + # relx=4, + # scroll_exit=True, + # ) + # ) return widgets def resize(self): @@ -501,8 +501,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): # rebuild the form, saving and restoring some of the fields that need to be preserved. saved_messages = self.monitor.entry_widget.values - autoload_dir = str(config.root_path / self.pipeline_models['autoload_directory'].value) - autoscan = self.pipeline_models['autoscan_on_startup'].value + # autoload_dir = str(config.root_path / self.pipeline_models['autoload_directory'].value) + # autoscan = self.pipeline_models['autoscan_on_startup'].value app.main_form = app.addForm( "MAIN", addModelsForm, name="Install Stable Diffusion Models", multipage=self.multipage, @@ -511,8 +511,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): app.main_form.monitor.entry_widget.values = saved_messages app.main_form.monitor.entry_widget.buffer([''],scroll_end=True) - app.main_form.pipeline_models['autoload_directory'].value = autoload_dir - app.main_form.pipeline_models['autoscan_on_startup'].value = autoscan + # app.main_form.pipeline_models['autoload_directory'].value = autoload_dir + # app.main_form.pipeline_models['autoscan_on_startup'].value = autoscan def marshall_arguments(self): """ @@ -546,17 +546,17 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): selections.install_models.extend(downloads.value.split()) # load directory and whether to scan on startup - if self.parentApp.autoload_pending: - selections.scan_directory = str(config.root_path / self.pipeline_models['autoload_directory'].value) - self.parentApp.autoload_pending = False - selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value + # if self.parentApp.autoload_pending: + # selections.scan_directory = str(config.root_path / self.pipeline_models['autoload_directory'].value) + # self.parentApp.autoload_pending = False + # selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value class AddModelApplication(npyscreen.NPSAppManaged): def __init__(self,opt): super().__init__() self.program_opts = opt self.user_cancelled = False - self.autoload_pending = True + # self.autoload_pending = True self.install_selections = InstallSelections() def onStart(self): From 73f63853ba6d5ff2a447d10ac40fe340a0fac893 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 12 Jun 2023 10:00:46 +1000 Subject: [PATCH 45/47] fix(nodes): use context for logger in param_easing --- invokeai/app/invocations/param_easing.py | 37 ++++++++++++------------ 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/invokeai/app/invocations/param_easing.py b/invokeai/app/invocations/param_easing.py index 1ff6261b88..e79763a35e 100644 --- a/invokeai/app/invocations/param_easing.py +++ b/invokeai/app/invocations/param_easing.py @@ -133,20 +133,19 @@ class StepParamEasingInvocation(BaseInvocation): postlist = list(num_poststeps * [self.post_end_value]) if log_diagnostics: - logger = InvokeAILogger.getLogger(name="StepParamEasing") - logger.debug("start_step: " + str(start_step)) - logger.debug("end_step: " + str(end_step)) - logger.debug("num_easing_steps: " + str(num_easing_steps)) - logger.debug("num_presteps: " + str(num_presteps)) - logger.debug("num_poststeps: " + str(num_poststeps)) - logger.debug("prelist size: " + str(len(prelist))) - logger.debug("postlist size: " + str(len(postlist))) - logger.debug("prelist: " + str(prelist)) - logger.debug("postlist: " + str(postlist)) + context.services.logger.debug("start_step: " + str(start_step)) + context.services.logger.debug("end_step: " + str(end_step)) + context.services.logger.debug("num_easing_steps: " + str(num_easing_steps)) + context.services.logger.debug("num_presteps: " + str(num_presteps)) + context.services.logger.debug("num_poststeps: " + str(num_poststeps)) + context.services.logger.debug("prelist size: " + str(len(prelist))) + context.services.logger.debug("postlist size: " + str(len(postlist))) + context.services.logger.debug("prelist: " + str(prelist)) + context.services.logger.debug("postlist: " + str(postlist)) easing_class = EASING_FUNCTIONS_MAP[self.easing] if log_diagnostics: - logger.debug("easing class: " + str(easing_class)) + context.services.logger.debug("easing class: " + str(easing_class)) easing_list = list() if self.mirror: # "expected" mirroring # if number of steps is even, squeeze duration down to (number_of_steps)/2 @@ -156,7 +155,7 @@ class StepParamEasingInvocation(BaseInvocation): # but if even then number_of_steps/2 === ceil(number_of_steps/2), so can just use ceil always base_easing_duration = int(np.ceil(num_easing_steps/2.0)) - if log_diagnostics: logger.debug("base easing duration: " + str(base_easing_duration)) + if log_diagnostics: context.services.logger.debug("base easing duration: " + str(base_easing_duration)) even_num_steps = (num_easing_steps % 2 == 0) # even number of steps easing_function = easing_class(start=self.start_value, end=self.end_value, @@ -166,14 +165,14 @@ class StepParamEasingInvocation(BaseInvocation): easing_val = easing_function.ease(step_index) base_easing_vals.append(easing_val) if log_diagnostics: - logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(easing_val)) + context.services.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(easing_val)) if even_num_steps: mirror_easing_vals = list(reversed(base_easing_vals)) else: mirror_easing_vals = list(reversed(base_easing_vals[0:-1])) if log_diagnostics: - logger.debug("base easing vals: " + str(base_easing_vals)) - logger.debug("mirror easing vals: " + str(mirror_easing_vals)) + context.services.logger.debug("base easing vals: " + str(base_easing_vals)) + context.services.logger.debug("mirror easing vals: " + str(mirror_easing_vals)) easing_list = base_easing_vals + mirror_easing_vals # FIXME: add alt_mirror option (alternative to default or mirror), or remove entirely @@ -206,12 +205,12 @@ class StepParamEasingInvocation(BaseInvocation): step_val = easing_function.ease(step_index) easing_list.append(step_val) if log_diagnostics: - logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(step_val)) + context.services.logger.debug("step_index: " + str(step_index) + ", easing_val: " + str(step_val)) if log_diagnostics: - logger.debug("prelist size: " + str(len(prelist))) - logger.debug("easing_list size: " + str(len(easing_list))) - logger.debug("postlist size: " + str(len(postlist))) + context.services.logger.debug("prelist size: " + str(len(prelist))) + context.services.logger.debug("easing_list size: " + str(len(easing_list))) + context.services.logger.debug("postlist size: " + str(len(postlist))) param_list = prelist + easing_list + postlist From fc322aa9f7f4a65b9533c2ba18eb67e2d0da859a Mon Sep 17 00:00:00 2001 From: Kent Keirsey <31807370+hipsterusername@users.noreply.github.com> Date: Tue, 27 Jun 2023 23:45:47 -0400 Subject: [PATCH 46/47] Update controlnet-aux to 0.0.6 and add LeReS --- .../controlnet_image_processors.py | 44 +++++++++---------- pyproject.toml | 2 +- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 01deebc9fa..8cfe35598d 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -31,7 +31,7 @@ from controlnet_aux import ( ZoeDetector, MediapipeFaceDetector, SamDetector, - # LeresDetector, + LeresDetector, ) from controlnet_aux.util import HWC3, ade_palette @@ -470,27 +470,27 @@ class MediapipeFaceProcessorInvocation(ImageProcessorInvocation, PILInvocationCo processed_image = mediapipe_face_processor(image, max_faces=self.max_faces, min_confidence=self.min_confidence) return processed_image -# class LeresImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig): -# """Applies leres processing to image""" -# # fmt: off -# type: Literal["leres_image_processor"] = "leres_image_processor" -# # Inputs -# thr_a: float = Field(default=0, description="Leres parameter `thr_a`") -# thr_b: float = Field(default=0, description="Leres parameter `thr_b`") -# boost: bool = Field(default=False, description="Whether to use boost mode") -# detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") -# image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") -# # fmt: on -# -# def run_processor(self, image): -# leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators") -# processed_image = leres_processor(image, -# thr_a=self.thr_a, -# thr_b=self.thr_b, -# boost=self.boost, -# detect_resolution=self.detect_resolution, -# image_resolution=self.image_resolution) -# return processed_image +class LeresImageProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig): + """Applies leres processing to image""" + # fmt: off + type: Literal["leres_image_processor"] = "leres_image_processor" + # Inputs + thr_a: float = Field(default=0, description="Leres parameter `thr_a`") + thr_b: float = Field(default=0, description="Leres parameter `thr_b`") + boost: bool = Field(default=False, description="Whether to use boost mode") + detect_resolution: int = Field(default=512, ge=0, description="The pixel resolution for detection") + image_resolution: int = Field(default=512, ge=0, description="The pixel resolution for the output image") + # fmt: on + + def run_processor(self, image): + leres_processor = LeresDetector.from_pretrained("lllyasviel/Annotators") + processed_image = leres_processor(image, + thr_a=self.thr_a, + thr_b=self.thr_b, + boost=self.boost, + detect_resolution=self.detect_resolution, + image_resolution=self.image_resolution) + return processed_image class TileResamplerProcessorInvocation(ImageProcessorInvocation, PILInvocationConfig): diff --git a/pyproject.toml b/pyproject.toml index d470b76937..6e5b8f4e22 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ dependencies = [ "click", "clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip", "compel>=1.2.1", - "controlnet-aux>=0.0.5", + "controlnet-aux>=0.0.6", "timm==0.6.13", # needed to override timm latest in controlnet_aux, see https://github.com/isl-org/ZoeDepth/issues/26 "datasets", "diffusers[torch]~=0.17.1", From 79fc708580a4efa1585e6ab5e55015957bfd9927 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Wed, 28 Jun 2023 15:26:42 -0400 Subject: [PATCH 47/47] warn but do not crash when model scan finds random cruft in `models` directory --- .../backend/install/invokeai_configure.py | 14 --------- .../backend/install/model_install_backend.py | 4 +-- .../backend/model_management/model_manager.py | 29 ++++++++++++++----- .../models/stable_diffusion.py | 2 +- invokeai/frontend/install/model_install.py | 25 ---------------- 5 files changed, 23 insertions(+), 51 deletions(-) diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index 5b713516be..a0104bef25 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -7,8 +7,6 @@ # Coauthor: Kevin Turner http://github.com/keturn # import sys -print("Loading Python libraries...\n",file=sys.stderr) - import argparse import io import os @@ -706,18 +704,6 @@ def write_opts(opts: Namespace, init_file: Path): def default_output_dir() -> Path: return config.root_path / "outputs" -# # ------------------------------------- -# def default_embedding_dir() -> Path: -# return config.root_path / "embeddings" - -# # ------------------------------------- -# def default_lora_dir() -> Path: -# return config.root_path / "loras" - -# # ------------------------------------- -# def default_controlnet_dir() -> Path: -# return config.root_path / "controlnets" - # ------------------------------------- def write_default_options(program_opts: Namespace, initfile: Path): opt = default_startup_options(initfile) diff --git a/invokeai/backend/install/model_install_backend.py b/invokeai/backend/install/model_install_backend.py index f6cde2c90f..1c2f4d2fc1 100644 --- a/invokeai/backend/install/model_install_backend.py +++ b/invokeai/backend/install/model_install_backend.py @@ -155,8 +155,6 @@ class ModelInstall(object): def install(self, selections: InstallSelections): job = 1 jobs = len(selections.remove_models) + len(selections.install_models) -# if selections.scan_directory: -# jobs += 1 # remove requested models for key in selections.remove_models: @@ -218,7 +216,7 @@ class ModelInstall(object): # the model from being probed twice in the event that it has already been probed. def _install_path(self, path: Path, info: ModelProbeInfo=None)->Path: try: - logger.info(f'Probing {path}') + # logger.debug(f'Probing {path}') info = info or ModelProbe().heuristic_probe(path,self.prediction_helper) model_name = path.stem if info.format=='checkpoint' else path.name if self.mgr.model_exists(model_name, info.base_type, info.model_type): diff --git a/invokeai/backend/model_management/model_manager.py b/invokeai/backend/model_management/model_manager.py index 292b706176..66206ac165 100644 --- a/invokeai/backend/model_management/model_manager.py +++ b/invokeai/backend/model_management/model_manager.py @@ -714,9 +714,12 @@ class ModelManager(object): if model_path.is_relative_to(self.app_config.root_path): model_path = model_path.relative_to(self.app_config.root_path) - model_config: ModelConfigBase = model_class.probe_config(str(model_path)) - self.models[model_key] = model_config - new_models_found = True + try: + model_config: ModelConfigBase = model_class.probe_config(str(model_path)) + self.models[model_key] = model_config + new_models_found = True + except NotImplementedError as e: + self.logger.warning(e) imported_models = self.autoimport() @@ -737,10 +740,10 @@ class ModelManager(object): ) installed = set() - + scanned_dirs = set() + config = self.app_config known_paths = {(self.app_config.root_path / x['path']) for x in self.list_models()} - scanned_dirs = set() for autodir in [config.autoimport_dir, config.lora_dir, @@ -748,19 +751,25 @@ class ModelManager(object): config.controlnet_dir]: if autodir is None: continue + + self.logger.info(f'Scanning {autodir} for models to import') autodir = self.app_config.root_path / autodir if not autodir.exists(): continue - + + items_scanned = 0 + new_models_found = set() + for root, dirs, files in os.walk(autodir): + items_scanned += len(dirs) + len(files) for d in dirs: path = Path(root) / d if path in known_paths or path.parent in scanned_dirs: scanned_dirs.add(path) continue if any([(path/x).exists() for x in {'config.json','model_index.json','learned_embeds.bin'}]): - installed.update(installer.heuristic_install(path)) + new_models_found.update(installer.heuristic_install(path)) scanned_dirs.add(path) for f in files: @@ -768,7 +777,11 @@ class ModelManager(object): if path in known_paths or path.parent in scanned_dirs: continue if path.suffix in {'.ckpt','.bin','.pth','.safetensors','.pt'}: - installed.update(installer.heuristic_install(path)) + new_models_found.update(installer.heuristic_install(path)) + + self.logger.info(f'Scanned {items_scanned} files and directories, imported {len(new_models_found)} models') + installed.update(new_models_found) + return installed def heuristic_import(self, diff --git a/invokeai/backend/model_management/models/stable_diffusion.py b/invokeai/backend/model_management/models/stable_diffusion.py index ee95e3a849..a5d43c98a2 100644 --- a/invokeai/backend/model_management/models/stable_diffusion.py +++ b/invokeai/backend/model_management/models/stable_diffusion.py @@ -69,7 +69,7 @@ class StableDiffusion1Model(DiffusersModel): in_channels = unet_config['in_channels'] else: - raise Exception("Not supported stable diffusion diffusers format(possibly onnx?)") + raise NotImplementedError(f"{path} is not a supported stable diffusion diffusers format") else: raise NotImplementedError(f"Unknown stable diffusion 1.* format: {model_format}") diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 04dabca590..33ef114912 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -316,31 +316,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage): **kwargs, ) - # label = "Directory to scan for models to automatically import ( autocompletes):" - # self.nextrely += 1 - # widgets.update( - # autoload_directory = self.add_widget_intelligent( - # FileBox, - # max_height=3, - # name=label, - # value=str(config.root_path / config.autoimport_dir) if config.autoimport_dir else None, - # select_dir=True, - # must_exist=True, - # use_two_lines=False, - # labelColor="DANGER", - # begin_entry_at=len(label)+1, - # scroll_exit=True, - # ) - # ) - # widgets.update( - # autoscan_on_startup = self.add_widget_intelligent( - # npyscreen.Checkbox, - # name="Scan and import from this directory each time InvokeAI starts", - # value=config.autoimport_dir is not None, - # relx=4, - # scroll_exit=True, - # ) - # ) return widgets def resize(self):