mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix invokeai_configure script to work with new mm; rename CLIs
This commit is contained in:
parent
dfcf38be91
commit
d959276217
@ -185,7 +185,9 @@ from .config_base import InvokeAISettings
|
|||||||
INIT_FILE = Path("invokeai.yaml")
|
INIT_FILE = Path("invokeai.yaml")
|
||||||
DB_FILE = Path("invokeai.db")
|
DB_FILE = Path("invokeai.db")
|
||||||
LEGACY_INIT_FILE = Path("invokeai.init")
|
LEGACY_INIT_FILE = Path("invokeai.init")
|
||||||
DEFAULT_MAX_VRAM = 0.5
|
DEFAULT_RAM_CACHE = 10.0
|
||||||
|
DEFAULT_VRAM_CACHE = 0.25
|
||||||
|
DEFAULT_CONVERT_CACHE = 20.0
|
||||||
|
|
||||||
|
|
||||||
class Categories(object):
|
class Categories(object):
|
||||||
@ -261,9 +263,9 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
version : bool = Field(default=False, description="Show InvokeAI version and exit", json_schema_extra=Categories.Other)
|
version : bool = Field(default=False, description="Show InvokeAI version and exit", json_schema_extra=Categories.Other)
|
||||||
|
|
||||||
# CACHE
|
# CACHE
|
||||||
ram : float = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, )
|
ram : float = Field(default=DEFAULT_RAM_CACHE, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number, GB)", json_schema_extra=Categories.ModelCache, )
|
||||||
vram : float = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, )
|
vram : float = Field(default=DEFAULT_VRAM_CACHE, ge=0, description="Amount of VRAM reserved for model storage (floating point number, GB)", json_schema_extra=Categories.ModelCache, )
|
||||||
convert_cache : float = Field(default=10.0, ge=0, description="Maximum size of on-disk converted models cache (GB)", json_schema_extra=Categories.ModelCache)
|
convert_cache : float = Field(default=DEFAULT_CONVERT_CACHE, ge=0, description="Maximum size of on-disk converted models cache (GB)", json_schema_extra=Categories.ModelCache)
|
||||||
|
|
||||||
lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, )
|
lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", json_schema_extra=Categories.ModelCache, )
|
||||||
log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache)
|
log_memory_usage : bool = Field(default=False, description="If True, a memory snapshot will be captured before and after every model cache operation, and the result will be logged (at debug level). There is a time cost to capturing the memory snapshots, so it is recommended to only enable this feature if you are actively inspecting the model cache's behaviour.", json_schema_extra=Categories.ModelCache)
|
||||||
|
@ -37,7 +37,7 @@ from invokeai.backend.model_manager.metadata import UnknownMetadataException
|
|||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
|
|
||||||
# name of the starter models file
|
# name of the starter models file
|
||||||
INITIAL_MODELS = "INITIAL_MODELS2.yaml"
|
INITIAL_MODELS = "INITIAL_MODELS.yaml"
|
||||||
|
|
||||||
|
|
||||||
def initialize_record_store(app_config: InvokeAIAppConfig) -> ModelRecordServiceBase:
|
def initialize_record_store(app_config: InvokeAIAppConfig) -> ModelRecordServiceBase:
|
||||||
|
@ -18,31 +18,30 @@ from argparse import Namespace
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import get_terminal_size
|
from shutil import get_terminal_size
|
||||||
from typing import Any, get_args, get_type_hints
|
from typing import Any, Optional, Set, Tuple, Type, get_args, get_type_hints
|
||||||
from urllib import request
|
from urllib import request
|
||||||
|
|
||||||
import npyscreen
|
import npyscreen
|
||||||
import omegaconf
|
|
||||||
import psutil
|
import psutil
|
||||||
import torch
|
import torch
|
||||||
import transformers
|
import transformers
|
||||||
import yaml
|
from diffusers import AutoencoderKL, ModelMixin
|
||||||
from diffusers import AutoencoderKL
|
|
||||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||||
from huggingface_hub import HfFolder
|
from huggingface_hub import HfFolder
|
||||||
from huggingface_hub import login as hf_hub_login
|
from huggingface_hub import login as hf_hub_login
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import DictConfig, OmegaConf
|
||||||
from pydantic import ValidationError
|
from pydantic.error_wrappers import ValidationError
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
||||||
|
|
||||||
import invokeai.configs as configs
|
import invokeai.configs as configs
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
from invokeai.backend.install.install_helper import InstallHelper, InstallSelections
|
||||||
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
|
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
|
||||||
from invokeai.backend.install.model_install_backend import InstallSelections, ModelInstall, hf_download_from_pretrained
|
from invokeai.backend.model_manager import BaseModelType, ModelType
|
||||||
from invokeai.backend.model_management.model_probe import BaseModelType, ModelType
|
from invokeai.backend.util import choose_precision, choose_torch_device
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
|
from invokeai.frontend.install.model_install import addModelsForm
|
||||||
|
|
||||||
# TO DO - Move all the frontend code into invokeai.frontend.install
|
# TO DO - Move all the frontend code into invokeai.frontend.install
|
||||||
from invokeai.frontend.install.widgets import (
|
from invokeai.frontend.install.widgets import (
|
||||||
@ -61,7 +60,7 @@ warnings.filterwarnings("ignore")
|
|||||||
transformers.logging.set_verbosity_error()
|
transformers.logging.set_verbosity_error()
|
||||||
|
|
||||||
|
|
||||||
def get_literal_fields(field) -> list[Any]:
|
def get_literal_fields(field: str) -> Tuple[Any]:
|
||||||
return get_args(get_type_hints(InvokeAIAppConfig).get(field))
|
return get_args(get_type_hints(InvokeAIAppConfig).get(field))
|
||||||
|
|
||||||
|
|
||||||
@ -80,8 +79,7 @@ ATTENTION_SLICE_CHOICES = get_literal_fields("attention_slice_size")
|
|||||||
GENERATION_OPT_CHOICES = ["sequential_guidance", "force_tiled_decode", "lazy_offload"]
|
GENERATION_OPT_CHOICES = ["sequential_guidance", "force_tiled_decode", "lazy_offload"]
|
||||||
GB = 1073741824 # GB in bytes
|
GB = 1073741824 # GB in bytes
|
||||||
HAS_CUDA = torch.cuda.is_available()
|
HAS_CUDA = torch.cuda.is_available()
|
||||||
_, MAX_VRAM = torch.cuda.mem_get_info() if HAS_CUDA else (0, 0)
|
_, MAX_VRAM = torch.cuda.mem_get_info() if HAS_CUDA else (0.0, 0.0)
|
||||||
|
|
||||||
|
|
||||||
MAX_VRAM /= GB
|
MAX_VRAM /= GB
|
||||||
MAX_RAM = psutil.virtual_memory().total / GB
|
MAX_RAM = psutil.virtual_memory().total / GB
|
||||||
@ -96,13 +94,15 @@ logger = InvokeAILogger.get_logger()
|
|||||||
|
|
||||||
|
|
||||||
class DummyWidgetValue(Enum):
|
class DummyWidgetValue(Enum):
|
||||||
|
"""Dummy widget values."""
|
||||||
|
|
||||||
zero = 0
|
zero = 0
|
||||||
true = True
|
true = True
|
||||||
false = False
|
false = False
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------
|
# --------------------------------------------
|
||||||
def postscript(errors: None):
|
def postscript(errors: Set[str]) -> None:
|
||||||
if not any(errors):
|
if not any(errors):
|
||||||
message = f"""
|
message = f"""
|
||||||
** INVOKEAI INSTALLATION SUCCESSFUL **
|
** INVOKEAI INSTALLATION SUCCESSFUL **
|
||||||
@ -143,7 +143,7 @@ def yes_or_no(prompt: str, default_yes=True):
|
|||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
def HfLogin(access_token) -> str:
|
def HfLogin(access_token) -> None:
|
||||||
"""
|
"""
|
||||||
Helper for logging in to Huggingface
|
Helper for logging in to Huggingface
|
||||||
The stdout capture is needed to hide the irrelevant "git credential helper" warning
|
The stdout capture is needed to hide the irrelevant "git credential helper" warning
|
||||||
@ -162,7 +162,7 @@ def HfLogin(access_token) -> str:
|
|||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
class ProgressBar:
|
class ProgressBar:
|
||||||
def __init__(self, model_name="file"):
|
def __init__(self, model_name: str = "file"):
|
||||||
self.pbar = None
|
self.pbar = None
|
||||||
self.name = model_name
|
self.name = model_name
|
||||||
|
|
||||||
@ -179,6 +179,22 @@ class ProgressBar:
|
|||||||
self.pbar.update(block_size)
|
self.pbar.update(block_size)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------
|
||||||
|
def hf_download_from_pretrained(model_class: Type[ModelMixin], model_name: str, destination: Path, **kwargs: Any):
|
||||||
|
filter = lambda x: "fp16 is not a valid" not in x.getMessage() # noqa E731
|
||||||
|
logger.addFilter(filter)
|
||||||
|
try:
|
||||||
|
model = model_class.from_pretrained(
|
||||||
|
model_name,
|
||||||
|
resume_download=True,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
model.save_pretrained(destination, safe_serialization=True)
|
||||||
|
finally:
|
||||||
|
logger.removeFilter(filter)
|
||||||
|
return destination
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
def download_with_progress_bar(model_url: str, model_dest: str, label: str = "the"):
|
def download_with_progress_bar(model_url: str, model_dest: str, label: str = "the"):
|
||||||
try:
|
try:
|
||||||
@ -249,6 +265,7 @@ def download_conversion_models():
|
|||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
# TO DO: use the download queue here.
|
||||||
def download_realesrgan():
|
def download_realesrgan():
|
||||||
logger.info("Installing ESRGAN Upscaling models...")
|
logger.info("Installing ESRGAN Upscaling models...")
|
||||||
URLs = [
|
URLs = [
|
||||||
@ -288,18 +305,19 @@ def download_lama():
|
|||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
def download_support_models():
|
def download_support_models() -> None:
|
||||||
download_realesrgan()
|
download_realesrgan()
|
||||||
download_lama()
|
download_lama()
|
||||||
download_conversion_models()
|
download_conversion_models()
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def get_root(root: str = None) -> str:
|
def get_root(root: Optional[str] = None) -> str:
|
||||||
if root:
|
if root:
|
||||||
return root
|
return root
|
||||||
elif os.environ.get("INVOKEAI_ROOT"):
|
elif root := os.environ.get("INVOKEAI_ROOT"):
|
||||||
return os.environ.get("INVOKEAI_ROOT")
|
assert root is not None
|
||||||
|
return root
|
||||||
else:
|
else:
|
||||||
return str(config.root_path)
|
return str(config.root_path)
|
||||||
|
|
||||||
@ -455,6 +473,25 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
|||||||
max_width=110,
|
max_width=110,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleFixedText,
|
||||||
|
name="Model disk conversion cache size (GB). This is used to cache safetensors files that need to be converted to diffusers..",
|
||||||
|
begin_entry_at=0,
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely -= 1
|
||||||
|
self.disk = self.add_widget_intelligent(
|
||||||
|
npyscreen.Slider,
|
||||||
|
value=clip(old_opts.convert_cache, range=(0, 100), step=0.5),
|
||||||
|
out_of=100,
|
||||||
|
lowest=0.0,
|
||||||
|
step=0.5,
|
||||||
|
relx=8,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely += 1
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.TitleFixedText,
|
npyscreen.TitleFixedText,
|
||||||
name="Model RAM cache size (GB). Make this at least large enough to hold a single full model (2GB for SD-1, 6GB for SDXL).",
|
name="Model RAM cache size (GB). Make this at least large enough to hold a single full model (2GB for SD-1, 6GB for SDXL).",
|
||||||
@ -495,6 +532,14 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
|||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.vram = DummyWidgetValue.zero
|
self.vram = DummyWidgetValue.zero
|
||||||
|
|
||||||
|
self.nextrely += 1
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.FixedText,
|
||||||
|
value="Location of the database used to store model path and configuration information:",
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
self.outdir = self.add_widget_intelligent(
|
self.outdir = self.add_widget_intelligent(
|
||||||
FileBox,
|
FileBox,
|
||||||
@ -506,19 +551,21 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
|||||||
labelColor="GOOD",
|
labelColor="GOOD",
|
||||||
begin_entry_at=40,
|
begin_entry_at=40,
|
||||||
max_height=3,
|
max_height=3,
|
||||||
|
max_width=127,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.autoimport_dirs = {}
|
self.autoimport_dirs = {}
|
||||||
self.autoimport_dirs["autoimport_dir"] = self.add_widget_intelligent(
|
self.autoimport_dirs["autoimport_dir"] = self.add_widget_intelligent(
|
||||||
FileBox,
|
FileBox,
|
||||||
name="Folder to recursively scan for new checkpoints, ControlNets, LoRAs and TI models",
|
name="Optional folder to scan for new checkpoints, ControlNets, LoRAs and TI models",
|
||||||
value=str(config.root_path / config.autoimport_dir),
|
value=str(config.root_path / config.autoimport_dir) if config.autoimport_dir else "",
|
||||||
select_dir=True,
|
select_dir=True,
|
||||||
must_exist=False,
|
must_exist=False,
|
||||||
use_two_lines=False,
|
use_two_lines=False,
|
||||||
labelColor="GOOD",
|
labelColor="GOOD",
|
||||||
begin_entry_at=32,
|
begin_entry_at=32,
|
||||||
max_height=3,
|
max_height=3,
|
||||||
|
max_width=127,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
@ -555,6 +602,10 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
|
|||||||
self.attention_slice_label.hidden = not show
|
self.attention_slice_label.hidden = not show
|
||||||
self.attention_slice_size.hidden = not show
|
self.attention_slice_size.hidden = not show
|
||||||
|
|
||||||
|
def show_hide_model_conf_override(self, value):
|
||||||
|
self.model_conf_override.hidden = value
|
||||||
|
self.model_conf_override.display()
|
||||||
|
|
||||||
def on_ok(self):
|
def on_ok(self):
|
||||||
options = self.marshall_arguments()
|
options = self.marshall_arguments()
|
||||||
if self.validate_field_values(options):
|
if self.validate_field_values(options):
|
||||||
@ -584,18 +635,21 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
|
|||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def marshall_arguments(self):
|
def marshall_arguments(self) -> Namespace:
|
||||||
new_opts = Namespace()
|
new_opts = Namespace()
|
||||||
|
|
||||||
for attr in [
|
for attr in [
|
||||||
"ram",
|
"ram",
|
||||||
"vram",
|
"vram",
|
||||||
|
"convert_cache",
|
||||||
"outdir",
|
"outdir",
|
||||||
]:
|
]:
|
||||||
if hasattr(self, attr):
|
if hasattr(self, attr):
|
||||||
setattr(new_opts, attr, getattr(self, attr).value)
|
setattr(new_opts, attr, getattr(self, attr).value)
|
||||||
|
|
||||||
for attr in self.autoimport_dirs:
|
for attr in self.autoimport_dirs:
|
||||||
|
if not self.autoimport_dirs[attr].value:
|
||||||
|
continue
|
||||||
directory = Path(self.autoimport_dirs[attr].value)
|
directory = Path(self.autoimport_dirs[attr].value)
|
||||||
if directory.is_relative_to(config.root_path):
|
if directory.is_relative_to(config.root_path):
|
||||||
directory = directory.relative_to(config.root_path)
|
directory = directory.relative_to(config.root_path)
|
||||||
@ -615,13 +669,14 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
|
|||||||
|
|
||||||
|
|
||||||
class EditOptApplication(npyscreen.NPSAppManaged):
|
class EditOptApplication(npyscreen.NPSAppManaged):
|
||||||
def __init__(self, program_opts: Namespace, invokeai_opts: Namespace):
|
def __init__(self, program_opts: Namespace, invokeai_opts: InvokeAIAppConfig, install_helper: InstallHelper):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.program_opts = program_opts
|
self.program_opts = program_opts
|
||||||
self.invokeai_opts = invokeai_opts
|
self.invokeai_opts = invokeai_opts
|
||||||
self.user_cancelled = False
|
self.user_cancelled = False
|
||||||
self.autoload_pending = True
|
self.autoload_pending = True
|
||||||
self.install_selections = default_user_selections(program_opts)
|
self.install_helper = install_helper
|
||||||
|
self.install_selections = default_user_selections(program_opts, install_helper)
|
||||||
|
|
||||||
def onStart(self):
|
def onStart(self):
|
||||||
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
||||||
@ -640,16 +695,10 @@ class EditOptApplication(npyscreen.NPSAppManaged):
|
|||||||
cycle_widgets=False,
|
cycle_widgets=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
def new_opts(self):
|
def new_opts(self) -> Namespace:
|
||||||
return self.options.marshall_arguments()
|
return self.options.marshall_arguments()
|
||||||
|
|
||||||
|
|
||||||
def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Namespace:
|
|
||||||
editApp = EditOptApplication(program_opts, invokeai_opts)
|
|
||||||
editApp.run()
|
|
||||||
return editApp.new_opts()
|
|
||||||
|
|
||||||
|
|
||||||
def default_ramcache() -> float:
|
def default_ramcache() -> float:
|
||||||
"""Run a heuristic for the default RAM cache based on installed RAM."""
|
"""Run a heuristic for the default RAM cache based on installed RAM."""
|
||||||
|
|
||||||
@ -660,27 +709,18 @@ def default_ramcache() -> float:
|
|||||||
) # 2.1 is just large enough for sd 1.5 ;-)
|
) # 2.1 is just large enough for sd 1.5 ;-)
|
||||||
|
|
||||||
|
|
||||||
def default_startup_options(init_file: Path) -> Namespace:
|
def default_startup_options(init_file: Path) -> InvokeAIAppConfig:
|
||||||
opts = InvokeAIAppConfig.get_config()
|
opts = InvokeAIAppConfig.get_config()
|
||||||
opts.ram = opts.ram or default_ramcache()
|
opts.ram = default_ramcache()
|
||||||
return opts
|
return opts
|
||||||
|
|
||||||
|
|
||||||
def default_user_selections(program_opts: Namespace) -> InstallSelections:
|
def default_user_selections(program_opts: Namespace, install_helper: InstallHelper) -> InstallSelections:
|
||||||
try:
|
default_model = install_helper.default_model()
|
||||||
installer = ModelInstall(config)
|
assert default_model is not None
|
||||||
except omegaconf.errors.ConfigKeyError:
|
default_models = [default_model] if program_opts.default_only else install_helper.recommended_models()
|
||||||
logger.warning("Your models.yaml file is corrupt or out of date. Reinitializing")
|
|
||||||
initialize_rootdir(config.root_path, True)
|
|
||||||
installer = ModelInstall(config)
|
|
||||||
|
|
||||||
models = installer.all_models()
|
|
||||||
return InstallSelections(
|
return InstallSelections(
|
||||||
install_models=[models[installer.default_model()].path or models[installer.default_model()].repo_id]
|
install_models=default_models if program_opts.yes_to_all else [],
|
||||||
if program_opts.default_only
|
|
||||||
else [models[x].path or models[x].repo_id for x in installer.recommended_models()]
|
|
||||||
if program_opts.yes_to_all
|
|
||||||
else [],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -716,21 +756,10 @@ def initialize_rootdir(root: Path, yes_to_all: bool = False):
|
|||||||
path.mkdir(parents=True, exist_ok=True)
|
path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
def maybe_create_models_yaml(root: Path):
|
|
||||||
models_yaml = root / "configs" / "models.yaml"
|
|
||||||
if models_yaml.exists():
|
|
||||||
if OmegaConf.load(models_yaml).get("__metadata__"): # up to date
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
logger.info("Creating new models.yaml, original saved as models.yaml.orig")
|
|
||||||
models_yaml.rename(models_yaml.parent / "models.yaml.orig")
|
|
||||||
|
|
||||||
with open(models_yaml, "w") as yaml_file:
|
|
||||||
yaml_file.write(yaml.dump({"__metadata__": {"version": "3.0.0"}}))
|
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def run_console_ui(program_opts: Namespace, initfile: Path = None) -> (Namespace, Namespace):
|
def run_console_ui(
|
||||||
|
program_opts: Namespace, initfile: Path, install_helper: InstallHelper
|
||||||
|
) -> Tuple[Optional[Namespace], Optional[InstallSelections]]:
|
||||||
invokeai_opts = default_startup_options(initfile)
|
invokeai_opts = default_startup_options(initfile)
|
||||||
invokeai_opts.root = program_opts.root
|
invokeai_opts.root = program_opts.root
|
||||||
|
|
||||||
@ -739,22 +768,16 @@ def run_console_ui(program_opts: Namespace, initfile: Path = None) -> (Namespace
|
|||||||
"Could not increase terminal size. Try running again with a larger window or smaller font size."
|
"Could not increase terminal size. Try running again with a larger window or smaller font size."
|
||||||
)
|
)
|
||||||
|
|
||||||
# the install-models application spawns a subprocess to install
|
editApp = EditOptApplication(program_opts, invokeai_opts, install_helper)
|
||||||
# models, and will crash unless this is set before running.
|
|
||||||
import torch
|
|
||||||
|
|
||||||
torch.multiprocessing.set_start_method("spawn")
|
|
||||||
|
|
||||||
editApp = EditOptApplication(program_opts, invokeai_opts)
|
|
||||||
editApp.run()
|
editApp.run()
|
||||||
if editApp.user_cancelled:
|
if editApp.user_cancelled:
|
||||||
return (None, None)
|
return (None, None)
|
||||||
else:
|
else:
|
||||||
return (editApp.new_opts, editApp.install_selections)
|
return (editApp.new_opts(), editApp.install_selections)
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def write_opts(opts: Namespace, init_file: Path):
|
def write_opts(opts: InvokeAIAppConfig, init_file: Path) -> None:
|
||||||
"""
|
"""
|
||||||
Update the invokeai.yaml file with values from current settings.
|
Update the invokeai.yaml file with values from current settings.
|
||||||
"""
|
"""
|
||||||
@ -762,7 +785,7 @@ def write_opts(opts: Namespace, init_file: Path):
|
|||||||
new_config = InvokeAIAppConfig.get_config()
|
new_config = InvokeAIAppConfig.get_config()
|
||||||
new_config.root = config.root
|
new_config.root = config.root
|
||||||
|
|
||||||
for key, value in opts.__dict__.items():
|
for key, value in opts.model_dump().items():
|
||||||
if hasattr(new_config, key):
|
if hasattr(new_config, key):
|
||||||
setattr(new_config, key, value)
|
setattr(new_config, key, value)
|
||||||
|
|
||||||
@ -779,7 +802,7 @@ def default_output_dir() -> Path:
|
|||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def write_default_options(program_opts: Namespace, initfile: Path):
|
def write_default_options(program_opts: Namespace, initfile: Path) -> None:
|
||||||
opt = default_startup_options(initfile)
|
opt = default_startup_options(initfile)
|
||||||
write_opts(opt, initfile)
|
write_opts(opt, initfile)
|
||||||
|
|
||||||
@ -789,16 +812,11 @@ def write_default_options(program_opts: Namespace, initfile: Path):
|
|||||||
# the legacy Args object in order to parse
|
# the legacy Args object in order to parse
|
||||||
# the old init file and write out the new
|
# the old init file and write out the new
|
||||||
# yaml format.
|
# yaml format.
|
||||||
def migrate_init_file(legacy_format: Path):
|
def migrate_init_file(legacy_format: Path) -> None:
|
||||||
old = legacy_parser.parse_args([f"@{str(legacy_format)}"])
|
old = legacy_parser.parse_args([f"@{str(legacy_format)}"])
|
||||||
new = InvokeAIAppConfig.get_config()
|
new = InvokeAIAppConfig.get_config()
|
||||||
|
|
||||||
fields = [
|
for attr in InvokeAIAppConfig.model_fields.keys():
|
||||||
x
|
|
||||||
for x, y in InvokeAIAppConfig.model_fields.items()
|
|
||||||
if (y.json_schema_extra.get("category", None) if y.json_schema_extra else None) != "DEPRECATED"
|
|
||||||
]
|
|
||||||
for attr in fields:
|
|
||||||
if hasattr(old, attr):
|
if hasattr(old, attr):
|
||||||
try:
|
try:
|
||||||
setattr(new, attr, getattr(old, attr))
|
setattr(new, attr, getattr(old, attr))
|
||||||
@ -819,7 +837,7 @@ def migrate_init_file(legacy_format: Path):
|
|||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def migrate_models(root: Path):
|
def migrate_models(root: Path) -> None:
|
||||||
from invokeai.backend.install.migrate_to_3 import do_migrate
|
from invokeai.backend.install.migrate_to_3 import do_migrate
|
||||||
|
|
||||||
do_migrate(root, root)
|
do_migrate(root, root)
|
||||||
@ -838,7 +856,9 @@ def migrate_if_needed(opt: Namespace, root: Path) -> bool:
|
|||||||
):
|
):
|
||||||
logger.info("** Migrating invokeai.init to invokeai.yaml")
|
logger.info("** Migrating invokeai.init to invokeai.yaml")
|
||||||
migrate_init_file(old_init_file)
|
migrate_init_file(old_init_file)
|
||||||
config.parse_args(argv=[], conf=OmegaConf.load(new_init_file))
|
omegaconf = OmegaConf.load(new_init_file)
|
||||||
|
assert isinstance(omegaconf, DictConfig)
|
||||||
|
config.parse_args(argv=[], conf=omegaconf)
|
||||||
|
|
||||||
if old_hub.exists():
|
if old_hub.exists():
|
||||||
migrate_models(config.root_path)
|
migrate_models(config.root_path)
|
||||||
@ -849,7 +869,7 @@ def migrate_if_needed(opt: Namespace, root: Path) -> bool:
|
|||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def main() -> None:
|
def main():
|
||||||
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--skip-sd-weights",
|
"--skip-sd-weights",
|
||||||
@ -908,6 +928,7 @@ def main() -> None:
|
|||||||
if opt.full_precision:
|
if opt.full_precision:
|
||||||
invoke_args.extend(["--precision", "float32"])
|
invoke_args.extend(["--precision", "float32"])
|
||||||
config.parse_args(invoke_args)
|
config.parse_args(invoke_args)
|
||||||
|
config.precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
|
||||||
logger = InvokeAILogger().get_logger(config=config)
|
logger = InvokeAILogger().get_logger(config=config)
|
||||||
|
|
||||||
errors = set()
|
errors = set()
|
||||||
@ -921,14 +942,18 @@ def main() -> None:
|
|||||||
# run this unconditionally in case new directories need to be added
|
# run this unconditionally in case new directories need to be added
|
||||||
initialize_rootdir(config.root_path, opt.yes_to_all)
|
initialize_rootdir(config.root_path, opt.yes_to_all)
|
||||||
|
|
||||||
models_to_download = default_user_selections(opt)
|
# this will initialize the models.yaml file if not present
|
||||||
|
install_helper = InstallHelper(config, logger)
|
||||||
|
|
||||||
|
models_to_download = default_user_selections(opt, install_helper)
|
||||||
new_init_file = config.root_path / "invokeai.yaml"
|
new_init_file = config.root_path / "invokeai.yaml"
|
||||||
|
|
||||||
if opt.yes_to_all:
|
if opt.yes_to_all:
|
||||||
write_default_options(opt, new_init_file)
|
write_default_options(opt, new_init_file)
|
||||||
init_options = Namespace(precision="float32" if opt.full_precision else "float16")
|
init_options = Namespace(precision="float32" if opt.full_precision else "float16")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
init_options, models_to_download = run_console_ui(opt, new_init_file)
|
init_options, models_to_download = run_console_ui(opt, new_init_file, install_helper)
|
||||||
if init_options:
|
if init_options:
|
||||||
write_opts(init_options, new_init_file)
|
write_opts(init_options, new_init_file)
|
||||||
else:
|
else:
|
||||||
@ -943,10 +968,12 @@ def main() -> None:
|
|||||||
|
|
||||||
if opt.skip_sd_weights:
|
if opt.skip_sd_weights:
|
||||||
logger.warning("Skipping diffusion weights download per user request")
|
logger.warning("Skipping diffusion weights download per user request")
|
||||||
|
|
||||||
elif models_to_download:
|
elif models_to_download:
|
||||||
process_and_execute(opt, models_to_download)
|
install_helper.add_or_delete(models_to_download)
|
||||||
|
|
||||||
postscript(errors=errors)
|
postscript(errors=errors)
|
||||||
|
|
||||||
if not opt.yes_to_all:
|
if not opt.yes_to_all:
|
||||||
input("Press any key to continue...")
|
input("Press any key to continue...")
|
||||||
except WindowTooSmallException as e:
|
except WindowTooSmallException as e:
|
||||||
|
@ -19,7 +19,7 @@ from invokeai.backend.model_manager import (
|
|||||||
)
|
)
|
||||||
from invokeai.backend.model_manager.load.convert_cache import ModelConvertCacheBase
|
from invokeai.backend.model_manager.load.convert_cache import ModelConvertCacheBase
|
||||||
from invokeai.backend.model_manager.load.load_base import LoadedModel, ModelLoaderBase
|
from invokeai.backend.model_manager.load.load_base import LoadedModel, ModelLoaderBase
|
||||||
from invokeai.backend.model_manager.load.model_cache.model_cache_base import CacheStats, ModelCacheBase, ModelLockerBase
|
from invokeai.backend.model_manager.load.model_cache.model_cache_base import ModelCacheBase, ModelLockerBase
|
||||||
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data, calc_model_size_by_fs
|
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_data, calc_model_size_by_fs
|
||||||
from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init
|
from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init
|
||||||
from invokeai.backend.util.devices import choose_torch_device, torch_dtype
|
from invokeai.backend.util.devices import choose_torch_device, torch_dtype
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from contextlib import nullcontext
|
from contextlib import nullcontext
|
||||||
from typing import Optional, Union
|
from typing import Literal, Optional, Union
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from torch import autocast
|
from torch import autocast
|
||||||
@ -31,7 +31,9 @@ def choose_torch_device() -> torch.device:
|
|||||||
|
|
||||||
# We are in transition here from using a single global AppConfig to allowing multiple
|
# We are in transition here from using a single global AppConfig to allowing multiple
|
||||||
# configurations. It is strongly recommended to pass the app_config to this function.
|
# configurations. It is strongly recommended to pass the app_config to this function.
|
||||||
def choose_precision(device: torch.device, app_config: Optional[InvokeAIAppConfig] = None) -> str:
|
def choose_precision(
|
||||||
|
device: torch.device, app_config: Optional[InvokeAIAppConfig] = None
|
||||||
|
) -> Literal["float32", "float16", "bfloat16"]:
|
||||||
"""Return an appropriate precision for the given torch device."""
|
"""Return an appropriate precision for the given torch device."""
|
||||||
app_config = app_config or config
|
app_config = app_config or config
|
||||||
if device.type == "cuda":
|
if device.type == "cuda":
|
||||||
|
@ -1,153 +1,157 @@
|
|||||||
# This file predefines a few models that the user may want to install.
|
# This file predefines a few models that the user may want to install.
|
||||||
sd-1/main/stable-diffusion-v1-5:
|
sd-1/main/stable-diffusion-v1-5:
|
||||||
description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
|
description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
|
||||||
repo_id: runwayml/stable-diffusion-v1-5
|
source: runwayml/stable-diffusion-v1-5
|
||||||
recommended: True
|
recommended: True
|
||||||
default: True
|
default: True
|
||||||
sd-1/main/stable-diffusion-v1-5-inpainting:
|
sd-1/main/stable-diffusion-v1-5-inpainting:
|
||||||
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
|
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
|
||||||
repo_id: runwayml/stable-diffusion-inpainting
|
source: runwayml/stable-diffusion-inpainting
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-2/main/stable-diffusion-2-1:
|
sd-2/main/stable-diffusion-2-1:
|
||||||
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
|
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
|
||||||
repo_id: stabilityai/stable-diffusion-2-1
|
source: stabilityai/stable-diffusion-2-1
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-2/main/stable-diffusion-2-inpainting:
|
sd-2/main/stable-diffusion-2-inpainting:
|
||||||
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
||||||
repo_id: stabilityai/stable-diffusion-2-inpainting
|
source: stabilityai/stable-diffusion-2-inpainting
|
||||||
recommended: False
|
recommended: False
|
||||||
sdxl/main/stable-diffusion-xl-base-1-0:
|
sdxl/main/stable-diffusion-xl-base-1-0:
|
||||||
description: Stable Diffusion XL base model (12 GB)
|
description: Stable Diffusion XL base model (12 GB)
|
||||||
repo_id: stabilityai/stable-diffusion-xl-base-1.0
|
source: stabilityai/stable-diffusion-xl-base-1.0
|
||||||
recommended: True
|
recommended: True
|
||||||
sdxl-refiner/main/stable-diffusion-xl-refiner-1-0:
|
sdxl-refiner/main/stable-diffusion-xl-refiner-1-0:
|
||||||
description: Stable Diffusion XL refiner model (12 GB)
|
description: Stable Diffusion XL refiner model (12 GB)
|
||||||
repo_id: stabilityai/stable-diffusion-xl-refiner-1.0
|
source: stabilityai/stable-diffusion-xl-refiner-1.0
|
||||||
recommended: False
|
recommended: False
|
||||||
sdxl/vae/sdxl-1-0-vae-fix:
|
sdxl/vae/sdxl-vae-fp16-fix:
|
||||||
description: Fine tuned version of the SDXL-1.0 VAE
|
description: Version of the SDXL-1.0 VAE that works in half precision mode
|
||||||
repo_id: madebyollin/sdxl-vae-fp16-fix
|
source: madebyollin/sdxl-vae-fp16-fix
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-1/main/Analog-Diffusion:
|
sd-1/main/Analog-Diffusion:
|
||||||
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
||||||
repo_id: wavymulder/Analog-Diffusion
|
source: wavymulder/Analog-Diffusion
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/Deliberate_v5:
|
sd-1/main/Deliberate:
|
||||||
description: Versatile model that produces detailed images up to 768px (4.27 GB)
|
description: Versatile model that produces detailed images up to 768px (4.27 GB)
|
||||||
path: https://huggingface.co/XpucT/Deliberate/resolve/main/Deliberate_v5.safetensors
|
source: XpucT/Deliberate
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/Dungeons-and-Diffusion:
|
sd-1/main/Dungeons-and-Diffusion:
|
||||||
description: Dungeons & Dragons characters (2.13 GB)
|
description: Dungeons & Dragons characters (2.13 GB)
|
||||||
repo_id: 0xJustin/Dungeons-and-Diffusion
|
source: 0xJustin/Dungeons-and-Diffusion
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/dreamlike-photoreal-2:
|
sd-1/main/dreamlike-photoreal-2:
|
||||||
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
|
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
|
||||||
repo_id: dreamlike-art/dreamlike-photoreal-2.0
|
source: dreamlike-art/dreamlike-photoreal-2.0
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/Inkpunk-Diffusion:
|
sd-1/main/Inkpunk-Diffusion:
|
||||||
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
|
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
|
||||||
repo_id: Envvi/Inkpunk-Diffusion
|
source: Envvi/Inkpunk-Diffusion
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/openjourney:
|
sd-1/main/openjourney:
|
||||||
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
|
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
|
||||||
repo_id: prompthero/openjourney
|
source: prompthero/openjourney
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/seek.art_MEGA:
|
sd-1/main/seek.art_MEGA:
|
||||||
repo_id: coreco/seek.art_MEGA
|
source: coreco/seek.art_MEGA
|
||||||
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
|
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/trinart_stable_diffusion_v2:
|
sd-1/main/trinart_stable_diffusion_v2:
|
||||||
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
|
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
|
||||||
repo_id: naclbit/trinart_stable_diffusion_v2
|
source: naclbit/trinart_stable_diffusion_v2
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/controlnet/qrcode_monster:
|
sd-1/controlnet/qrcode_monster:
|
||||||
repo_id: monster-labs/control_v1p_sd15_qrcode_monster
|
source: monster-labs/control_v1p_sd15_qrcode_monster
|
||||||
subfolder: v2
|
subfolder: v2
|
||||||
sd-1/controlnet/canny:
|
sd-1/controlnet/canny:
|
||||||
repo_id: lllyasviel/control_v11p_sd15_canny
|
source: lllyasviel/control_v11p_sd15_canny
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-1/controlnet/inpaint:
|
sd-1/controlnet/inpaint:
|
||||||
repo_id: lllyasviel/control_v11p_sd15_inpaint
|
source: lllyasviel/control_v11p_sd15_inpaint
|
||||||
sd-1/controlnet/mlsd:
|
sd-1/controlnet/mlsd:
|
||||||
repo_id: lllyasviel/control_v11p_sd15_mlsd
|
source: lllyasviel/control_v11p_sd15_mlsd
|
||||||
sd-1/controlnet/depth:
|
sd-1/controlnet/depth:
|
||||||
repo_id: lllyasviel/control_v11f1p_sd15_depth
|
source: lllyasviel/control_v11f1p_sd15_depth
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-1/controlnet/normal_bae:
|
sd-1/controlnet/normal_bae:
|
||||||
repo_id: lllyasviel/control_v11p_sd15_normalbae
|
source: lllyasviel/control_v11p_sd15_normalbae
|
||||||
sd-1/controlnet/seg:
|
sd-1/controlnet/seg:
|
||||||
repo_id: lllyasviel/control_v11p_sd15_seg
|
source: lllyasviel/control_v11p_sd15_seg
|
||||||
sd-1/controlnet/lineart:
|
sd-1/controlnet/lineart:
|
||||||
repo_id: lllyasviel/control_v11p_sd15_lineart
|
source: lllyasviel/control_v11p_sd15_lineart
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-1/controlnet/lineart_anime:
|
sd-1/controlnet/lineart_anime:
|
||||||
repo_id: lllyasviel/control_v11p_sd15s2_lineart_anime
|
source: lllyasviel/control_v11p_sd15s2_lineart_anime
|
||||||
sd-1/controlnet/openpose:
|
sd-1/controlnet/openpose:
|
||||||
repo_id: lllyasviel/control_v11p_sd15_openpose
|
source: lllyasviel/control_v11p_sd15_openpose
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-1/controlnet/scribble:
|
sd-1/controlnet/scribble:
|
||||||
repo_id: lllyasviel/control_v11p_sd15_scribble
|
source: lllyasviel/control_v11p_sd15_scribble
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/controlnet/softedge:
|
sd-1/controlnet/softedge:
|
||||||
repo_id: lllyasviel/control_v11p_sd15_softedge
|
source: lllyasviel/control_v11p_sd15_softedge
|
||||||
sd-1/controlnet/shuffle:
|
sd-1/controlnet/shuffle:
|
||||||
repo_id: lllyasviel/control_v11e_sd15_shuffle
|
source: lllyasviel/control_v11e_sd15_shuffle
|
||||||
sd-1/controlnet/tile:
|
sd-1/controlnet/tile:
|
||||||
repo_id: lllyasviel/control_v11f1e_sd15_tile
|
source: lllyasviel/control_v11f1e_sd15_tile
|
||||||
sd-1/controlnet/ip2p:
|
sd-1/controlnet/ip2p:
|
||||||
repo_id: lllyasviel/control_v11e_sd15_ip2p
|
source: lllyasviel/control_v11e_sd15_ip2p
|
||||||
sd-1/t2i_adapter/canny-sd15:
|
sd-1/t2i_adapter/canny-sd15:
|
||||||
repo_id: TencentARC/t2iadapter_canny_sd15v2
|
source: TencentARC/t2iadapter_canny_sd15v2
|
||||||
sd-1/t2i_adapter/sketch-sd15:
|
sd-1/t2i_adapter/sketch-sd15:
|
||||||
repo_id: TencentARC/t2iadapter_sketch_sd15v2
|
source: TencentARC/t2iadapter_sketch_sd15v2
|
||||||
sd-1/t2i_adapter/depth-sd15:
|
sd-1/t2i_adapter/depth-sd15:
|
||||||
repo_id: TencentARC/t2iadapter_depth_sd15v2
|
source: TencentARC/t2iadapter_depth_sd15v2
|
||||||
sd-1/t2i_adapter/zoedepth-sd15:
|
sd-1/t2i_adapter/zoedepth-sd15:
|
||||||
repo_id: TencentARC/t2iadapter_zoedepth_sd15v1
|
source: TencentARC/t2iadapter_zoedepth_sd15v1
|
||||||
sdxl/t2i_adapter/canny-sdxl:
|
sdxl/t2i_adapter/canny-sdxl:
|
||||||
repo_id: TencentARC/t2i-adapter-canny-sdxl-1.0
|
source: TencentARC/t2i-adapter-canny-sdxl-1.0
|
||||||
sdxl/t2i_adapter/zoedepth-sdxl:
|
sdxl/t2i_adapter/zoedepth-sdxl:
|
||||||
repo_id: TencentARC/t2i-adapter-depth-zoe-sdxl-1.0
|
source: TencentARC/t2i-adapter-depth-zoe-sdxl-1.0
|
||||||
sdxl/t2i_adapter/lineart-sdxl:
|
sdxl/t2i_adapter/lineart-sdxl:
|
||||||
repo_id: TencentARC/t2i-adapter-lineart-sdxl-1.0
|
source: TencentARC/t2i-adapter-lineart-sdxl-1.0
|
||||||
sdxl/t2i_adapter/sketch-sdxl:
|
sdxl/t2i_adapter/sketch-sdxl:
|
||||||
repo_id: TencentARC/t2i-adapter-sketch-sdxl-1.0
|
source: TencentARC/t2i-adapter-sketch-sdxl-1.0
|
||||||
sd-1/embedding/EasyNegative:
|
sd-1/embedding/EasyNegative:
|
||||||
path: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors
|
source: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-1/embedding/ahx-beta-453407d:
|
description: A textual inversion to use in the negative prompt to reduce bad anatomy
|
||||||
repo_id: sd-concepts-library/ahx-beta-453407d
|
sd-1/lora/FlatColor:
|
||||||
|
source: https://civitai.com/models/6433/loraflatcolor
|
||||||
|
recommended: True
|
||||||
|
description: A LoRA that generates scenery using solid blocks of color
|
||||||
sd-1/lora/Ink scenery:
|
sd-1/lora/Ink scenery:
|
||||||
path: https://civitai.com/api/download/models/83390
|
source: https://civitai.com/api/download/models/83390
|
||||||
|
description: Generate india ink-like landscapes
|
||||||
sd-1/ip_adapter/ip_adapter_sd15:
|
sd-1/ip_adapter/ip_adapter_sd15:
|
||||||
repo_id: InvokeAI/ip_adapter_sd15
|
source: InvokeAI/ip_adapter_sd15
|
||||||
recommended: True
|
recommended: True
|
||||||
requires:
|
requires:
|
||||||
- InvokeAI/ip_adapter_sd_image_encoder
|
- InvokeAI/ip_adapter_sd_image_encoder
|
||||||
description: IP-Adapter for SD 1.5 models
|
description: IP-Adapter for SD 1.5 models
|
||||||
sd-1/ip_adapter/ip_adapter_plus_sd15:
|
sd-1/ip_adapter/ip_adapter_plus_sd15:
|
||||||
repo_id: InvokeAI/ip_adapter_plus_sd15
|
source: InvokeAI/ip_adapter_plus_sd15
|
||||||
recommended: False
|
recommended: False
|
||||||
requires:
|
requires:
|
||||||
- InvokeAI/ip_adapter_sd_image_encoder
|
- InvokeAI/ip_adapter_sd_image_encoder
|
||||||
description: Refined IP-Adapter for SD 1.5 models
|
description: Refined IP-Adapter for SD 1.5 models
|
||||||
sd-1/ip_adapter/ip_adapter_plus_face_sd15:
|
sd-1/ip_adapter/ip_adapter_plus_face_sd15:
|
||||||
repo_id: InvokeAI/ip_adapter_plus_face_sd15
|
source: InvokeAI/ip_adapter_plus_face_sd15
|
||||||
recommended: False
|
recommended: False
|
||||||
requires:
|
requires:
|
||||||
- InvokeAI/ip_adapter_sd_image_encoder
|
- InvokeAI/ip_adapter_sd_image_encoder
|
||||||
description: Refined IP-Adapter for SD 1.5 models, adapted for faces
|
description: Refined IP-Adapter for SD 1.5 models, adapted for faces
|
||||||
sdxl/ip_adapter/ip_adapter_sdxl:
|
sdxl/ip_adapter/ip_adapter_sdxl:
|
||||||
repo_id: InvokeAI/ip_adapter_sdxl
|
source: InvokeAI/ip_adapter_sdxl
|
||||||
recommended: False
|
recommended: False
|
||||||
requires:
|
requires:
|
||||||
- InvokeAI/ip_adapter_sdxl_image_encoder
|
- InvokeAI/ip_adapter_sdxl_image_encoder
|
||||||
description: IP-Adapter for SDXL models
|
description: IP-Adapter for SDXL models
|
||||||
any/clip_vision/ip_adapter_sd_image_encoder:
|
any/clip_vision/ip_adapter_sd_image_encoder:
|
||||||
repo_id: InvokeAI/ip_adapter_sd_image_encoder
|
source: InvokeAI/ip_adapter_sd_image_encoder
|
||||||
recommended: False
|
recommended: False
|
||||||
description: Required model for using IP-Adapters with SD-1/2 models
|
description: Required model for using IP-Adapters with SD-1/2 models
|
||||||
any/clip_vision/ip_adapter_sdxl_image_encoder:
|
any/clip_vision/ip_adapter_sdxl_image_encoder:
|
||||||
repo_id: InvokeAI/ip_adapter_sdxl_image_encoder
|
source: InvokeAI/ip_adapter_sdxl_image_encoder
|
||||||
recommended: False
|
recommended: False
|
||||||
description: Required model for using IP-Adapters with SDXL models
|
description: Required model for using IP-Adapters with SDXL models
|
||||||
|
@ -1,157 +1,153 @@
|
|||||||
# This file predefines a few models that the user may want to install.
|
# This file predefines a few models that the user may want to install.
|
||||||
sd-1/main/stable-diffusion-v1-5:
|
sd-1/main/stable-diffusion-v1-5:
|
||||||
description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
|
description: Stable Diffusion version 1.5 diffusers model (4.27 GB)
|
||||||
source: runwayml/stable-diffusion-v1-5
|
repo_id: runwayml/stable-diffusion-v1-5
|
||||||
recommended: True
|
recommended: True
|
||||||
default: True
|
default: True
|
||||||
sd-1/main/stable-diffusion-v1-5-inpainting:
|
sd-1/main/stable-diffusion-v1-5-inpainting:
|
||||||
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
|
description: RunwayML SD 1.5 model optimized for inpainting, diffusers version (4.27 GB)
|
||||||
source: runwayml/stable-diffusion-inpainting
|
repo_id: runwayml/stable-diffusion-inpainting
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-2/main/stable-diffusion-2-1:
|
sd-2/main/stable-diffusion-2-1:
|
||||||
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
|
description: Stable Diffusion version 2.1 diffusers model, trained on 768 pixel images (5.21 GB)
|
||||||
source: stabilityai/stable-diffusion-2-1
|
repo_id: stabilityai/stable-diffusion-2-1
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-2/main/stable-diffusion-2-inpainting:
|
sd-2/main/stable-diffusion-2-inpainting:
|
||||||
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
description: Stable Diffusion version 2.0 inpainting model (5.21 GB)
|
||||||
source: stabilityai/stable-diffusion-2-inpainting
|
repo_id: stabilityai/stable-diffusion-2-inpainting
|
||||||
recommended: False
|
recommended: False
|
||||||
sdxl/main/stable-diffusion-xl-base-1-0:
|
sdxl/main/stable-diffusion-xl-base-1-0:
|
||||||
description: Stable Diffusion XL base model (12 GB)
|
description: Stable Diffusion XL base model (12 GB)
|
||||||
source: stabilityai/stable-diffusion-xl-base-1.0
|
repo_id: stabilityai/stable-diffusion-xl-base-1.0
|
||||||
recommended: True
|
recommended: True
|
||||||
sdxl-refiner/main/stable-diffusion-xl-refiner-1-0:
|
sdxl-refiner/main/stable-diffusion-xl-refiner-1-0:
|
||||||
description: Stable Diffusion XL refiner model (12 GB)
|
description: Stable Diffusion XL refiner model (12 GB)
|
||||||
source: stabilityai/stable-diffusion-xl-refiner-1.0
|
repo_id: stabilityai/stable-diffusion-xl-refiner-1.0
|
||||||
recommended: False
|
recommended: False
|
||||||
sdxl/vae/sdxl-vae-fp16-fix:
|
sdxl/vae/sdxl-1-0-vae-fix:
|
||||||
description: Version of the SDXL-1.0 VAE that works in half precision mode
|
description: Fine tuned version of the SDXL-1.0 VAE
|
||||||
source: madebyollin/sdxl-vae-fp16-fix
|
repo_id: madebyollin/sdxl-vae-fp16-fix
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-1/main/Analog-Diffusion:
|
sd-1/main/Analog-Diffusion:
|
||||||
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
description: An SD-1.5 model trained on diverse analog photographs (2.13 GB)
|
||||||
source: wavymulder/Analog-Diffusion
|
repo_id: wavymulder/Analog-Diffusion
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/Deliberate:
|
sd-1/main/Deliberate_v5:
|
||||||
description: Versatile model that produces detailed images up to 768px (4.27 GB)
|
description: Versatile model that produces detailed images up to 768px (4.27 GB)
|
||||||
source: XpucT/Deliberate
|
path: https://huggingface.co/XpucT/Deliberate/resolve/main/Deliberate_v5.safetensors
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/Dungeons-and-Diffusion:
|
sd-1/main/Dungeons-and-Diffusion:
|
||||||
description: Dungeons & Dragons characters (2.13 GB)
|
description: Dungeons & Dragons characters (2.13 GB)
|
||||||
source: 0xJustin/Dungeons-and-Diffusion
|
repo_id: 0xJustin/Dungeons-and-Diffusion
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/dreamlike-photoreal-2:
|
sd-1/main/dreamlike-photoreal-2:
|
||||||
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
|
description: A photorealistic model trained on 768 pixel images based on SD 1.5 (2.13 GB)
|
||||||
source: dreamlike-art/dreamlike-photoreal-2.0
|
repo_id: dreamlike-art/dreamlike-photoreal-2.0
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/Inkpunk-Diffusion:
|
sd-1/main/Inkpunk-Diffusion:
|
||||||
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
|
description: Stylized illustrations inspired by Gorillaz, FLCL and Shinkawa; prompt with "nvinkpunk" (4.27 GB)
|
||||||
source: Envvi/Inkpunk-Diffusion
|
repo_id: Envvi/Inkpunk-Diffusion
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/openjourney:
|
sd-1/main/openjourney:
|
||||||
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
|
description: An SD 1.5 model fine tuned on Midjourney; prompt with "mdjrny-v4 style" (2.13 GB)
|
||||||
source: prompthero/openjourney
|
repo_id: prompthero/openjourney
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/seek.art_MEGA:
|
sd-1/main/seek.art_MEGA:
|
||||||
source: coreco/seek.art_MEGA
|
repo_id: coreco/seek.art_MEGA
|
||||||
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
|
description: A general use SD-1.5 "anything" model that supports multiple styles (2.1 GB)
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/main/trinart_stable_diffusion_v2:
|
sd-1/main/trinart_stable_diffusion_v2:
|
||||||
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
|
description: An SD-1.5 model finetuned with ~40K assorted high resolution manga/anime-style images (2.13 GB)
|
||||||
source: naclbit/trinart_stable_diffusion_v2
|
repo_id: naclbit/trinart_stable_diffusion_v2
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/controlnet/qrcode_monster:
|
sd-1/controlnet/qrcode_monster:
|
||||||
source: monster-labs/control_v1p_sd15_qrcode_monster
|
repo_id: monster-labs/control_v1p_sd15_qrcode_monster
|
||||||
subfolder: v2
|
subfolder: v2
|
||||||
sd-1/controlnet/canny:
|
sd-1/controlnet/canny:
|
||||||
source: lllyasviel/control_v11p_sd15_canny
|
repo_id: lllyasviel/control_v11p_sd15_canny
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-1/controlnet/inpaint:
|
sd-1/controlnet/inpaint:
|
||||||
source: lllyasviel/control_v11p_sd15_inpaint
|
repo_id: lllyasviel/control_v11p_sd15_inpaint
|
||||||
sd-1/controlnet/mlsd:
|
sd-1/controlnet/mlsd:
|
||||||
source: lllyasviel/control_v11p_sd15_mlsd
|
repo_id: lllyasviel/control_v11p_sd15_mlsd
|
||||||
sd-1/controlnet/depth:
|
sd-1/controlnet/depth:
|
||||||
source: lllyasviel/control_v11f1p_sd15_depth
|
repo_id: lllyasviel/control_v11f1p_sd15_depth
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-1/controlnet/normal_bae:
|
sd-1/controlnet/normal_bae:
|
||||||
source: lllyasviel/control_v11p_sd15_normalbae
|
repo_id: lllyasviel/control_v11p_sd15_normalbae
|
||||||
sd-1/controlnet/seg:
|
sd-1/controlnet/seg:
|
||||||
source: lllyasviel/control_v11p_sd15_seg
|
repo_id: lllyasviel/control_v11p_sd15_seg
|
||||||
sd-1/controlnet/lineart:
|
sd-1/controlnet/lineart:
|
||||||
source: lllyasviel/control_v11p_sd15_lineart
|
repo_id: lllyasviel/control_v11p_sd15_lineart
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-1/controlnet/lineart_anime:
|
sd-1/controlnet/lineart_anime:
|
||||||
source: lllyasviel/control_v11p_sd15s2_lineart_anime
|
repo_id: lllyasviel/control_v11p_sd15s2_lineart_anime
|
||||||
sd-1/controlnet/openpose:
|
sd-1/controlnet/openpose:
|
||||||
source: lllyasviel/control_v11p_sd15_openpose
|
repo_id: lllyasviel/control_v11p_sd15_openpose
|
||||||
recommended: True
|
recommended: True
|
||||||
sd-1/controlnet/scribble:
|
sd-1/controlnet/scribble:
|
||||||
source: lllyasviel/control_v11p_sd15_scribble
|
repo_id: lllyasviel/control_v11p_sd15_scribble
|
||||||
recommended: False
|
recommended: False
|
||||||
sd-1/controlnet/softedge:
|
sd-1/controlnet/softedge:
|
||||||
source: lllyasviel/control_v11p_sd15_softedge
|
repo_id: lllyasviel/control_v11p_sd15_softedge
|
||||||
sd-1/controlnet/shuffle:
|
sd-1/controlnet/shuffle:
|
||||||
source: lllyasviel/control_v11e_sd15_shuffle
|
repo_id: lllyasviel/control_v11e_sd15_shuffle
|
||||||
sd-1/controlnet/tile:
|
sd-1/controlnet/tile:
|
||||||
source: lllyasviel/control_v11f1e_sd15_tile
|
repo_id: lllyasviel/control_v11f1e_sd15_tile
|
||||||
sd-1/controlnet/ip2p:
|
sd-1/controlnet/ip2p:
|
||||||
source: lllyasviel/control_v11e_sd15_ip2p
|
repo_id: lllyasviel/control_v11e_sd15_ip2p
|
||||||
sd-1/t2i_adapter/canny-sd15:
|
sd-1/t2i_adapter/canny-sd15:
|
||||||
source: TencentARC/t2iadapter_canny_sd15v2
|
repo_id: TencentARC/t2iadapter_canny_sd15v2
|
||||||
sd-1/t2i_adapter/sketch-sd15:
|
sd-1/t2i_adapter/sketch-sd15:
|
||||||
source: TencentARC/t2iadapter_sketch_sd15v2
|
repo_id: TencentARC/t2iadapter_sketch_sd15v2
|
||||||
sd-1/t2i_adapter/depth-sd15:
|
sd-1/t2i_adapter/depth-sd15:
|
||||||
source: TencentARC/t2iadapter_depth_sd15v2
|
repo_id: TencentARC/t2iadapter_depth_sd15v2
|
||||||
sd-1/t2i_adapter/zoedepth-sd15:
|
sd-1/t2i_adapter/zoedepth-sd15:
|
||||||
source: TencentARC/t2iadapter_zoedepth_sd15v1
|
repo_id: TencentARC/t2iadapter_zoedepth_sd15v1
|
||||||
sdxl/t2i_adapter/canny-sdxl:
|
sdxl/t2i_adapter/canny-sdxl:
|
||||||
source: TencentARC/t2i-adapter-canny-sdxl-1.0
|
repo_id: TencentARC/t2i-adapter-canny-sdxl-1.0
|
||||||
sdxl/t2i_adapter/zoedepth-sdxl:
|
sdxl/t2i_adapter/zoedepth-sdxl:
|
||||||
source: TencentARC/t2i-adapter-depth-zoe-sdxl-1.0
|
repo_id: TencentARC/t2i-adapter-depth-zoe-sdxl-1.0
|
||||||
sdxl/t2i_adapter/lineart-sdxl:
|
sdxl/t2i_adapter/lineart-sdxl:
|
||||||
source: TencentARC/t2i-adapter-lineart-sdxl-1.0
|
repo_id: TencentARC/t2i-adapter-lineart-sdxl-1.0
|
||||||
sdxl/t2i_adapter/sketch-sdxl:
|
sdxl/t2i_adapter/sketch-sdxl:
|
||||||
source: TencentARC/t2i-adapter-sketch-sdxl-1.0
|
repo_id: TencentARC/t2i-adapter-sketch-sdxl-1.0
|
||||||
sd-1/embedding/EasyNegative:
|
sd-1/embedding/EasyNegative:
|
||||||
source: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors
|
path: https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors
|
||||||
recommended: True
|
recommended: True
|
||||||
description: A textual inversion to use in the negative prompt to reduce bad anatomy
|
sd-1/embedding/ahx-beta-453407d:
|
||||||
sd-1/lora/FlatColor:
|
repo_id: sd-concepts-library/ahx-beta-453407d
|
||||||
source: https://civitai.com/models/6433/loraflatcolor
|
|
||||||
recommended: True
|
|
||||||
description: A LoRA that generates scenery using solid blocks of color
|
|
||||||
sd-1/lora/Ink scenery:
|
sd-1/lora/Ink scenery:
|
||||||
source: https://civitai.com/api/download/models/83390
|
path: https://civitai.com/api/download/models/83390
|
||||||
description: Generate india ink-like landscapes
|
|
||||||
sd-1/ip_adapter/ip_adapter_sd15:
|
sd-1/ip_adapter/ip_adapter_sd15:
|
||||||
source: InvokeAI/ip_adapter_sd15
|
repo_id: InvokeAI/ip_adapter_sd15
|
||||||
recommended: True
|
recommended: True
|
||||||
requires:
|
requires:
|
||||||
- InvokeAI/ip_adapter_sd_image_encoder
|
- InvokeAI/ip_adapter_sd_image_encoder
|
||||||
description: IP-Adapter for SD 1.5 models
|
description: IP-Adapter for SD 1.5 models
|
||||||
sd-1/ip_adapter/ip_adapter_plus_sd15:
|
sd-1/ip_adapter/ip_adapter_plus_sd15:
|
||||||
source: InvokeAI/ip_adapter_plus_sd15
|
repo_id: InvokeAI/ip_adapter_plus_sd15
|
||||||
recommended: False
|
recommended: False
|
||||||
requires:
|
requires:
|
||||||
- InvokeAI/ip_adapter_sd_image_encoder
|
- InvokeAI/ip_adapter_sd_image_encoder
|
||||||
description: Refined IP-Adapter for SD 1.5 models
|
description: Refined IP-Adapter for SD 1.5 models
|
||||||
sd-1/ip_adapter/ip_adapter_plus_face_sd15:
|
sd-1/ip_adapter/ip_adapter_plus_face_sd15:
|
||||||
source: InvokeAI/ip_adapter_plus_face_sd15
|
repo_id: InvokeAI/ip_adapter_plus_face_sd15
|
||||||
recommended: False
|
recommended: False
|
||||||
requires:
|
requires:
|
||||||
- InvokeAI/ip_adapter_sd_image_encoder
|
- InvokeAI/ip_adapter_sd_image_encoder
|
||||||
description: Refined IP-Adapter for SD 1.5 models, adapted for faces
|
description: Refined IP-Adapter for SD 1.5 models, adapted for faces
|
||||||
sdxl/ip_adapter/ip_adapter_sdxl:
|
sdxl/ip_adapter/ip_adapter_sdxl:
|
||||||
source: InvokeAI/ip_adapter_sdxl
|
repo_id: InvokeAI/ip_adapter_sdxl
|
||||||
recommended: False
|
recommended: False
|
||||||
requires:
|
requires:
|
||||||
- InvokeAI/ip_adapter_sdxl_image_encoder
|
- InvokeAI/ip_adapter_sdxl_image_encoder
|
||||||
description: IP-Adapter for SDXL models
|
description: IP-Adapter for SDXL models
|
||||||
any/clip_vision/ip_adapter_sd_image_encoder:
|
any/clip_vision/ip_adapter_sd_image_encoder:
|
||||||
source: InvokeAI/ip_adapter_sd_image_encoder
|
repo_id: InvokeAI/ip_adapter_sd_image_encoder
|
||||||
recommended: False
|
recommended: False
|
||||||
description: Required model for using IP-Adapters with SD-1/2 models
|
description: Required model for using IP-Adapters with SD-1/2 models
|
||||||
any/clip_vision/ip_adapter_sdxl_image_encoder:
|
any/clip_vision/ip_adapter_sdxl_image_encoder:
|
||||||
source: InvokeAI/ip_adapter_sdxl_image_encoder
|
repo_id: InvokeAI/ip_adapter_sdxl_image_encoder
|
||||||
recommended: False
|
recommended: False
|
||||||
description: Required model for using IP-Adapters with SDXL models
|
description: Required model for using IP-Adapters with SDXL models
|
@ -6,47 +6,45 @@
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
This is the npyscreen frontend to the model installation application.
|
This is the npyscreen frontend to the model installation application.
|
||||||
The work is actually done in backend code in model_install_backend.py.
|
It is currently named model_install2.py, but will ultimately replace model_install.py.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import curses
|
import curses
|
||||||
import logging
|
|
||||||
import sys
|
import sys
|
||||||
import textwrap
|
|
||||||
import traceback
|
import traceback
|
||||||
|
import warnings
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
from multiprocessing import Process
|
|
||||||
from multiprocessing.connection import Connection, Pipe
|
|
||||||
from pathlib import Path
|
|
||||||
from shutil import get_terminal_size
|
from shutil import get_terminal_size
|
||||||
from typing import Optional
|
from typing import Any, Dict, List, Optional, Set
|
||||||
|
|
||||||
import npyscreen
|
import npyscreen
|
||||||
import torch
|
import torch
|
||||||
from npyscreen import widget
|
from npyscreen import widget
|
||||||
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
from invokeai.backend.install.model_install_backend import InstallSelections, ModelInstall, SchedulerPredictionType
|
from invokeai.app.services.model_install import ModelInstallServiceBase
|
||||||
from invokeai.backend.model_management import ModelManager, ModelType
|
from invokeai.backend.install.install_helper import InstallHelper, InstallSelections, UnifiedModelInfo
|
||||||
|
from invokeai.backend.model_manager import ModelType
|
||||||
from invokeai.backend.util import choose_precision, choose_torch_device
|
from invokeai.backend.util import choose_precision, choose_torch_device
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
from invokeai.frontend.install.widgets import (
|
from invokeai.frontend.install.widgets import (
|
||||||
MIN_COLS,
|
MIN_COLS,
|
||||||
MIN_LINES,
|
MIN_LINES,
|
||||||
BufferBox,
|
|
||||||
CenteredTitleText,
|
CenteredTitleText,
|
||||||
CyclingForm,
|
CyclingForm,
|
||||||
MultiSelectColumns,
|
MultiSelectColumns,
|
||||||
SingleSelectColumns,
|
SingleSelectColumns,
|
||||||
TextBox,
|
TextBox,
|
||||||
WindowTooSmallException,
|
WindowTooSmallException,
|
||||||
select_stable_diffusion_config_file,
|
|
||||||
set_min_terminal_size,
|
set_min_terminal_size,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
warnings.filterwarnings("ignore", category=UserWarning) # noqa: E402
|
||||||
config = InvokeAIAppConfig.get_config()
|
config = InvokeAIAppConfig.get_config()
|
||||||
logger = InvokeAILogger.get_logger()
|
logger = InvokeAILogger.get_logger("ModelInstallService")
|
||||||
|
logger.setLevel("WARNING")
|
||||||
|
# logger.setLevel('DEBUG')
|
||||||
|
|
||||||
# build a table mapping all non-printable characters to None
|
# build a table mapping all non-printable characters to None
|
||||||
# for stripping control characters
|
# for stripping control characters
|
||||||
@ -58,44 +56,42 @@ MAX_OTHER_MODELS = 72
|
|||||||
|
|
||||||
|
|
||||||
def make_printable(s: str) -> str:
|
def make_printable(s: str) -> str:
|
||||||
"""Replace non-printable characters in a string"""
|
"""Replace non-printable characters in a string."""
|
||||||
return s.translate(NOPRINT_TRANS_TABLE)
|
return s.translate(NOPRINT_TRANS_TABLE)
|
||||||
|
|
||||||
|
|
||||||
class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||||
|
"""Main form for interactive TUI."""
|
||||||
|
|
||||||
# for responsive resizing set to False, but this seems to cause a crash!
|
# for responsive resizing set to False, but this seems to cause a crash!
|
||||||
FIX_MINIMUM_SIZE_WHEN_CREATED = True
|
FIX_MINIMUM_SIZE_WHEN_CREATED = True
|
||||||
|
|
||||||
# for persistence
|
# for persistence
|
||||||
current_tab = 0
|
current_tab = 0
|
||||||
|
|
||||||
def __init__(self, parentApp, name, multipage=False, *args, **keywords):
|
def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, multipage: bool = False, **keywords: Any):
|
||||||
self.multipage = multipage
|
self.multipage = multipage
|
||||||
self.subprocess = None
|
self.subprocess = None
|
||||||
super().__init__(parentApp=parentApp, name=name, *args, **keywords) # noqa: B026 # TODO: maybe this is bad?
|
super().__init__(parentApp=parentApp, name=name, **keywords)
|
||||||
|
|
||||||
def create(self):
|
def create(self) -> None:
|
||||||
|
self.installer = self.parentApp.install_helper.installer
|
||||||
|
self.model_labels = self._get_model_labels()
|
||||||
self.keypress_timeout = 10
|
self.keypress_timeout = 10
|
||||||
self.counter = 0
|
self.counter = 0
|
||||||
self.subprocess_connection = None
|
self.subprocess_connection = None
|
||||||
|
|
||||||
if not config.model_conf_path.exists():
|
|
||||||
with open(config.model_conf_path, "w") as file:
|
|
||||||
print("# InvokeAI model configuration file", file=file)
|
|
||||||
self.installer = ModelInstall(config)
|
|
||||||
self.all_models = self.installer.all_models()
|
|
||||||
self.starter_models = self.installer.starter_models()
|
|
||||||
self.model_labels = self._get_model_labels()
|
|
||||||
window_width, window_height = get_terminal_size()
|
window_width, window_height = get_terminal_size()
|
||||||
|
|
||||||
self.nextrely -= 1
|
# npyscreen has no typing hints
|
||||||
|
self.nextrely -= 1 # type: ignore
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.FixedText,
|
npyscreen.FixedText,
|
||||||
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields. Cursor keys navigate, and <space> selects.",
|
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields. Cursor keys navigate, and <space> selects.",
|
||||||
editable=False,
|
editable=False,
|
||||||
color="CAUTION",
|
color="CAUTION",
|
||||||
)
|
)
|
||||||
self.nextrely += 1
|
self.nextrely += 1 # type: ignore
|
||||||
self.tabs = self.add_widget_intelligent(
|
self.tabs = self.add_widget_intelligent(
|
||||||
SingleSelectColumns,
|
SingleSelectColumns,
|
||||||
values=[
|
values=[
|
||||||
@ -115,9 +111,9 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
)
|
)
|
||||||
self.tabs.on_changed = self._toggle_tables
|
self.tabs.on_changed = self._toggle_tables
|
||||||
|
|
||||||
top_of_table = self.nextrely
|
top_of_table = self.nextrely # type: ignore
|
||||||
self.starter_pipelines = self.add_starter_pipelines()
|
self.starter_pipelines = self.add_starter_pipelines()
|
||||||
bottom_of_table = self.nextrely
|
bottom_of_table = self.nextrely # type: ignore
|
||||||
|
|
||||||
self.nextrely = top_of_table
|
self.nextrely = top_of_table
|
||||||
self.pipeline_models = self.add_pipeline_widgets(
|
self.pipeline_models = self.add_pipeline_widgets(
|
||||||
@ -162,15 +158,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
|
|
||||||
self.nextrely = bottom_of_table + 1
|
self.nextrely = bottom_of_table + 1
|
||||||
|
|
||||||
self.monitor = self.add_widget_intelligent(
|
|
||||||
BufferBox,
|
|
||||||
name="Log Messages",
|
|
||||||
editable=False,
|
|
||||||
max_height=6,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
done_label = "APPLY CHANGES"
|
|
||||||
back_label = "BACK"
|
back_label = "BACK"
|
||||||
cancel_label = "CANCEL"
|
cancel_label = "CANCEL"
|
||||||
current_position = self.nextrely
|
current_position = self.nextrely
|
||||||
@ -186,14 +174,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
npyscreen.ButtonPress, name=cancel_label, when_pressed_function=self.on_cancel
|
npyscreen.ButtonPress, name=cancel_label, when_pressed_function=self.on_cancel
|
||||||
)
|
)
|
||||||
self.nextrely = current_position
|
self.nextrely = current_position
|
||||||
self.ok_button = self.add_widget_intelligent(
|
|
||||||
npyscreen.ButtonPress,
|
|
||||||
name=done_label,
|
|
||||||
relx=(window_width - len(done_label)) // 2,
|
|
||||||
when_pressed_function=self.on_execute,
|
|
||||||
)
|
|
||||||
|
|
||||||
label = "APPLY CHANGES & EXIT"
|
label = "APPLY CHANGES"
|
||||||
self.nextrely = current_position
|
self.nextrely = current_position
|
||||||
self.done = self.add_widget_intelligent(
|
self.done = self.add_widget_intelligent(
|
||||||
npyscreen.ButtonPress,
|
npyscreen.ButtonPress,
|
||||||
@ -210,17 +192,16 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
############# diffusers tab ##########
|
############# diffusers tab ##########
|
||||||
def add_starter_pipelines(self) -> dict[str, npyscreen.widget]:
|
def add_starter_pipelines(self) -> dict[str, npyscreen.widget]:
|
||||||
"""Add widgets responsible for selecting diffusers models"""
|
"""Add widgets responsible for selecting diffusers models"""
|
||||||
widgets = {}
|
widgets: Dict[str, npyscreen.widget] = {}
|
||||||
models = self.all_models
|
|
||||||
starters = self.starter_models
|
|
||||||
starter_model_labels = self.model_labels
|
|
||||||
|
|
||||||
self.installed_models = sorted([x for x in starters if models[x].installed])
|
all_models = self.all_models # master dict of all models, indexed by key
|
||||||
|
model_list = [x for x in self.starter_models if all_models[x].type in ["main", "vae"]]
|
||||||
|
model_labels = [self.model_labels[x] for x in model_list]
|
||||||
|
|
||||||
widgets.update(
|
widgets.update(
|
||||||
label1=self.add_widget_intelligent(
|
label1=self.add_widget_intelligent(
|
||||||
CenteredTitleText,
|
CenteredTitleText,
|
||||||
name="Select from a starter set of Stable Diffusion models from HuggingFace.",
|
name="Select from a starter set of Stable Diffusion models from HuggingFace and Civitae.",
|
||||||
editable=False,
|
editable=False,
|
||||||
labelColor="CAUTION",
|
labelColor="CAUTION",
|
||||||
)
|
)
|
||||||
@ -230,23 +211,24 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
# if user has already installed some initial models, then don't patronize them
|
# if user has already installed some initial models, then don't patronize them
|
||||||
# by showing more recommendations
|
# by showing more recommendations
|
||||||
show_recommended = len(self.installed_models) == 0
|
show_recommended = len(self.installed_models) == 0
|
||||||
keys = [x for x in models.keys() if x in starters]
|
|
||||||
|
checked = [
|
||||||
|
model_list.index(x)
|
||||||
|
for x in model_list
|
||||||
|
if (show_recommended and all_models[x].recommended) or all_models[x].installed
|
||||||
|
]
|
||||||
widgets.update(
|
widgets.update(
|
||||||
models_selected=self.add_widget_intelligent(
|
models_selected=self.add_widget_intelligent(
|
||||||
MultiSelectColumns,
|
MultiSelectColumns,
|
||||||
columns=1,
|
columns=1,
|
||||||
name="Install Starter Models",
|
name="Install Starter Models",
|
||||||
values=[starter_model_labels[x] for x in keys],
|
values=model_labels,
|
||||||
value=[
|
value=checked,
|
||||||
keys.index(x)
|
max_height=len(model_list) + 1,
|
||||||
for x in keys
|
|
||||||
if (show_recommended and models[x].recommended) or (x in self.installed_models)
|
|
||||||
],
|
|
||||||
max_height=len(starters) + 1,
|
|
||||||
relx=4,
|
relx=4,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
),
|
),
|
||||||
models=keys,
|
models=model_list,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
@ -257,14 +239,18 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
self,
|
self,
|
||||||
model_type: ModelType,
|
model_type: ModelType,
|
||||||
window_width: int = 120,
|
window_width: int = 120,
|
||||||
install_prompt: str = None,
|
install_prompt: Optional[str] = None,
|
||||||
exclude: set = None,
|
exclude: Optional[Set[str]] = None,
|
||||||
) -> dict[str, npyscreen.widget]:
|
) -> dict[str, npyscreen.widget]:
|
||||||
"""Generic code to create model selection widgets"""
|
"""Generic code to create model selection widgets"""
|
||||||
if exclude is None:
|
if exclude is None:
|
||||||
exclude = set()
|
exclude = set()
|
||||||
widgets = {}
|
widgets: Dict[str, npyscreen.widget] = {}
|
||||||
model_list = [x for x in self.all_models if self.all_models[x].model_type == model_type and x not in exclude]
|
all_models = self.all_models
|
||||||
|
model_list = sorted(
|
||||||
|
[x for x in all_models if all_models[x].type == model_type and x not in exclude],
|
||||||
|
key=lambda x: all_models[x].name or "",
|
||||||
|
)
|
||||||
model_labels = [self.model_labels[x] for x in model_list]
|
model_labels = [self.model_labels[x] for x in model_list]
|
||||||
|
|
||||||
show_recommended = len(self.installed_models) == 0
|
show_recommended = len(self.installed_models) == 0
|
||||||
@ -300,7 +286,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
value=[
|
value=[
|
||||||
model_list.index(x)
|
model_list.index(x)
|
||||||
for x in model_list
|
for x in model_list
|
||||||
if (show_recommended and self.all_models[x].recommended) or self.all_models[x].installed
|
if (show_recommended and all_models[x].recommended) or all_models[x].installed
|
||||||
],
|
],
|
||||||
max_height=len(model_list) // columns + 1,
|
max_height=len(model_list) // columns + 1,
|
||||||
relx=4,
|
relx=4,
|
||||||
@ -324,7 +310,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
download_ids=self.add_widget_intelligent(
|
download_ids=self.add_widget_intelligent(
|
||||||
TextBox,
|
TextBox,
|
||||||
name="Additional URLs, or HuggingFace repo_ids to install (Space separated. Use shift-control-V to paste):",
|
name="Additional URLs, or HuggingFace repo_ids to install (Space separated. Use shift-control-V to paste):",
|
||||||
max_height=4,
|
max_height=6,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
editable=True,
|
editable=True,
|
||||||
)
|
)
|
||||||
@ -349,13 +335,13 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
|
|
||||||
return widgets
|
return widgets
|
||||||
|
|
||||||
def resize(self):
|
def resize(self) -> None:
|
||||||
super().resize()
|
super().resize()
|
||||||
if s := self.starter_pipelines.get("models_selected"):
|
if s := self.starter_pipelines.get("models_selected"):
|
||||||
keys = [x for x in self.all_models.keys() if x in self.starter_models]
|
if model_list := self.starter_pipelines.get("models"):
|
||||||
s.values = [self.model_labels[x] for x in keys]
|
s.values = [self.model_labels[x] for x in model_list]
|
||||||
|
|
||||||
def _toggle_tables(self, value=None):
|
def _toggle_tables(self, value: List[int]) -> None:
|
||||||
selected_tab = value[0]
|
selected_tab = value[0]
|
||||||
widgets = [
|
widgets = [
|
||||||
self.starter_pipelines,
|
self.starter_pipelines,
|
||||||
@ -385,17 +371,18 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
self.display()
|
self.display()
|
||||||
|
|
||||||
def _get_model_labels(self) -> dict[str, str]:
|
def _get_model_labels(self) -> dict[str, str]:
|
||||||
|
"""Return a list of trimmed labels for all models."""
|
||||||
window_width, window_height = get_terminal_size()
|
window_width, window_height = get_terminal_size()
|
||||||
checkbox_width = 4
|
checkbox_width = 4
|
||||||
spacing_width = 2
|
spacing_width = 2
|
||||||
|
result = {}
|
||||||
|
|
||||||
models = self.all_models
|
models = self.all_models
|
||||||
label_width = max([len(models[x].name) for x in models])
|
label_width = max([len(models[x].name or "") for x in self.starter_models])
|
||||||
description_width = window_width - label_width - checkbox_width - spacing_width
|
description_width = window_width - label_width - checkbox_width - spacing_width
|
||||||
|
|
||||||
result = {}
|
for key in self.all_models:
|
||||||
for x in models.keys():
|
description = models[key].description
|
||||||
description = models[x].description
|
|
||||||
description = (
|
description = (
|
||||||
description[0 : description_width - 3] + "..."
|
description[0 : description_width - 3] + "..."
|
||||||
if description and len(description) > description_width
|
if description and len(description) > description_width
|
||||||
@ -403,7 +390,8 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
if description
|
if description
|
||||||
else ""
|
else ""
|
||||||
)
|
)
|
||||||
result[x] = f"%-{label_width}s %s" % (models[x].name, description)
|
result[key] = f"%-{label_width}s %s" % (models[key].name, description)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _get_columns(self) -> int:
|
def _get_columns(self) -> int:
|
||||||
@ -413,50 +401,40 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
|
|
||||||
def confirm_deletions(self, selections: InstallSelections) -> bool:
|
def confirm_deletions(self, selections: InstallSelections) -> bool:
|
||||||
remove_models = selections.remove_models
|
remove_models = selections.remove_models
|
||||||
if len(remove_models) > 0:
|
if remove_models:
|
||||||
mods = "\n".join([ModelManager.parse_key(x)[0] for x in remove_models])
|
model_names = [self.all_models[x].name or "" for x in remove_models]
|
||||||
return npyscreen.notify_ok_cancel(
|
mods = "\n".join(model_names)
|
||||||
|
is_ok = npyscreen.notify_ok_cancel(
|
||||||
f"These unchecked models will be deleted from disk. Continue?\n---------\n{mods}"
|
f"These unchecked models will be deleted from disk. Continue?\n---------\n{mods}"
|
||||||
)
|
)
|
||||||
|
assert isinstance(is_ok, bool) # npyscreen doesn't have return type annotations
|
||||||
|
return is_ok
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def on_execute(self):
|
@property
|
||||||
self.marshall_arguments()
|
def all_models(self) -> Dict[str, UnifiedModelInfo]:
|
||||||
app = self.parentApp
|
# npyscreen doesn't having typing hints
|
||||||
if not self.confirm_deletions(app.install_selections):
|
return self.parentApp.install_helper.all_models # type: ignore
|
||||||
return
|
|
||||||
|
|
||||||
self.monitor.entry_widget.buffer(["Processing..."], scroll_end=True)
|
@property
|
||||||
self.ok_button.hidden = True
|
def starter_models(self) -> List[str]:
|
||||||
self.display()
|
return self.parentApp.install_helper._starter_models # type: ignore
|
||||||
|
|
||||||
# TO DO: Spawn a worker thread, not a subprocess
|
@property
|
||||||
parent_conn, child_conn = Pipe()
|
def installed_models(self) -> List[str]:
|
||||||
p = Process(
|
return self.parentApp.install_helper._installed_models # type: ignore
|
||||||
target=process_and_execute,
|
|
||||||
kwargs={
|
|
||||||
"opt": app.program_opts,
|
|
||||||
"selections": app.install_selections,
|
|
||||||
"conn_out": child_conn,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
p.start()
|
|
||||||
child_conn.close()
|
|
||||||
self.subprocess_connection = parent_conn
|
|
||||||
self.subprocess = p
|
|
||||||
app.install_selections = InstallSelections()
|
|
||||||
|
|
||||||
def on_back(self):
|
def on_back(self) -> None:
|
||||||
self.parentApp.switchFormPrevious()
|
self.parentApp.switchFormPrevious()
|
||||||
self.editing = False
|
self.editing = False
|
||||||
|
|
||||||
def on_cancel(self):
|
def on_cancel(self) -> None:
|
||||||
self.parentApp.setNextForm(None)
|
self.parentApp.setNextForm(None)
|
||||||
self.parentApp.user_cancelled = True
|
self.parentApp.user_cancelled = True
|
||||||
self.editing = False
|
self.editing = False
|
||||||
|
|
||||||
def on_done(self):
|
def on_done(self) -> None:
|
||||||
self.marshall_arguments()
|
self.marshall_arguments()
|
||||||
if not self.confirm_deletions(self.parentApp.install_selections):
|
if not self.confirm_deletions(self.parentApp.install_selections):
|
||||||
return
|
return
|
||||||
@ -464,77 +442,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
self.parentApp.user_cancelled = False
|
self.parentApp.user_cancelled = False
|
||||||
self.editing = False
|
self.editing = False
|
||||||
|
|
||||||
########## This routine monitors the child process that is performing model installation and removal #####
|
def marshall_arguments(self) -> None:
|
||||||
def while_waiting(self):
|
|
||||||
"""Called during idle periods. Main task is to update the Log Messages box with messages
|
|
||||||
from the child process that does the actual installation/removal"""
|
|
||||||
c = self.subprocess_connection
|
|
||||||
if not c:
|
|
||||||
return
|
|
||||||
|
|
||||||
monitor_widget = self.monitor.entry_widget
|
|
||||||
while c.poll():
|
|
||||||
try:
|
|
||||||
data = c.recv_bytes().decode("utf-8")
|
|
||||||
data.strip("\n")
|
|
||||||
|
|
||||||
# processing child is requesting user input to select the
|
|
||||||
# right configuration file
|
|
||||||
if data.startswith("*need v2 config"):
|
|
||||||
_, model_path, *_ = data.split(":", 2)
|
|
||||||
self._return_v2_config(model_path)
|
|
||||||
|
|
||||||
# processing child is done
|
|
||||||
elif data == "*done*":
|
|
||||||
self._close_subprocess_and_regenerate_form()
|
|
||||||
break
|
|
||||||
|
|
||||||
# update the log message box
|
|
||||||
else:
|
|
||||||
data = make_printable(data)
|
|
||||||
data = data.replace("[A", "")
|
|
||||||
monitor_widget.buffer(
|
|
||||||
textwrap.wrap(
|
|
||||||
data,
|
|
||||||
width=monitor_widget.width,
|
|
||||||
subsequent_indent=" ",
|
|
||||||
),
|
|
||||||
scroll_end=True,
|
|
||||||
)
|
|
||||||
self.display()
|
|
||||||
except (EOFError, OSError):
|
|
||||||
self.subprocess_connection = None
|
|
||||||
|
|
||||||
def _return_v2_config(self, model_path: str):
|
|
||||||
c = self.subprocess_connection
|
|
||||||
model_name = Path(model_path).name
|
|
||||||
message = select_stable_diffusion_config_file(model_name=model_name)
|
|
||||||
c.send_bytes(message.encode("utf-8"))
|
|
||||||
|
|
||||||
def _close_subprocess_and_regenerate_form(self):
|
|
||||||
app = self.parentApp
|
|
||||||
self.subprocess_connection.close()
|
|
||||||
self.subprocess_connection = None
|
|
||||||
self.monitor.entry_widget.buffer(["** Action Complete **"])
|
|
||||||
self.display()
|
|
||||||
|
|
||||||
# rebuild the form, saving and restoring some of the fields that need to be preserved.
|
|
||||||
saved_messages = self.monitor.entry_widget.values
|
|
||||||
|
|
||||||
app.main_form = app.addForm(
|
|
||||||
"MAIN",
|
|
||||||
addModelsForm,
|
|
||||||
name="Install Stable Diffusion Models",
|
|
||||||
multipage=self.multipage,
|
|
||||||
)
|
|
||||||
app.switchForm("MAIN")
|
|
||||||
|
|
||||||
app.main_form.monitor.entry_widget.values = saved_messages
|
|
||||||
app.main_form.monitor.entry_widget.buffer([""], scroll_end=True)
|
|
||||||
# app.main_form.pipeline_models['autoload_directory'].value = autoload_dir
|
|
||||||
# app.main_form.pipeline_models['autoscan_on_startup'].value = autoscan
|
|
||||||
|
|
||||||
def marshall_arguments(self):
|
|
||||||
"""
|
"""
|
||||||
Assemble arguments and store as attributes of the application:
|
Assemble arguments and store as attributes of the application:
|
||||||
.starter_models: dict of model names to install from INITIAL_CONFIGURE.yaml
|
.starter_models: dict of model names to install from INITIAL_CONFIGURE.yaml
|
||||||
@ -564,46 +472,24 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
models_to_install = [x for x in selected if not self.all_models[x].installed]
|
models_to_install = [x for x in selected if not self.all_models[x].installed]
|
||||||
models_to_remove = [x for x in section["models"] if x not in selected and self.all_models[x].installed]
|
models_to_remove = [x for x in section["models"] if x not in selected and self.all_models[x].installed]
|
||||||
selections.remove_models.extend(models_to_remove)
|
selections.remove_models.extend(models_to_remove)
|
||||||
selections.install_models.extend(
|
selections.install_models.extend([all_models[x] for x in models_to_install])
|
||||||
all_models[x].path or all_models[x].repo_id
|
|
||||||
for x in models_to_install
|
|
||||||
if all_models[x].path or all_models[x].repo_id
|
|
||||||
)
|
|
||||||
|
|
||||||
# models located in the 'download_ids" section
|
# models located in the 'download_ids" section
|
||||||
for section in ui_sections:
|
for section in ui_sections:
|
||||||
if downloads := section.get("download_ids"):
|
if downloads := section.get("download_ids"):
|
||||||
selections.install_models.extend(downloads.value.split())
|
models = [UnifiedModelInfo(source=x) for x in downloads.value.split()]
|
||||||
|
selections.install_models.extend(models)
|
||||||
# NOT NEEDED - DONE IN BACKEND NOW
|
|
||||||
# # special case for the ipadapter_models. If any of the adapters are
|
|
||||||
# # chosen, then we add the corresponding encoder(s) to the install list.
|
|
||||||
# section = self.ipadapter_models
|
|
||||||
# if section.get("models_selected"):
|
|
||||||
# selected_adapters = [
|
|
||||||
# self.all_models[section["models"][x]].name for x in section.get("models_selected").value
|
|
||||||
# ]
|
|
||||||
# encoders = []
|
|
||||||
# if any(["sdxl" in x for x in selected_adapters]):
|
|
||||||
# encoders.append("ip_adapter_sdxl_image_encoder")
|
|
||||||
# if any(["sd15" in x for x in selected_adapters]):
|
|
||||||
# encoders.append("ip_adapter_sd_image_encoder")
|
|
||||||
# for encoder in encoders:
|
|
||||||
# key = f"any/clip_vision/{encoder}"
|
|
||||||
# repo_id = f"InvokeAI/{encoder}"
|
|
||||||
# if key not in self.all_models:
|
|
||||||
# selections.install_models.append(repo_id)
|
|
||||||
|
|
||||||
|
|
||||||
class AddModelApplication(npyscreen.NPSAppManaged):
|
class AddModelApplication(npyscreen.NPSAppManaged): # type: ignore
|
||||||
def __init__(self, opt):
|
def __init__(self, opt: Namespace, install_helper: InstallHelper):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.program_opts = opt
|
self.program_opts = opt
|
||||||
self.user_cancelled = False
|
self.user_cancelled = False
|
||||||
# self.autoload_pending = True
|
|
||||||
self.install_selections = InstallSelections()
|
self.install_selections = InstallSelections()
|
||||||
|
self.install_helper = install_helper
|
||||||
|
|
||||||
def onStart(self):
|
def onStart(self) -> None:
|
||||||
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
||||||
self.main_form = self.addForm(
|
self.main_form = self.addForm(
|
||||||
"MAIN",
|
"MAIN",
|
||||||
@ -613,138 +499,62 @@ class AddModelApplication(npyscreen.NPSAppManaged):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class StderrToMessage:
|
def list_models(installer: ModelInstallServiceBase, model_type: ModelType):
|
||||||
def __init__(self, connection: Connection):
|
"""Print out all models of type model_type."""
|
||||||
self.connection = connection
|
models = installer.record_store.search_by_attr(model_type=model_type)
|
||||||
|
print(f"Installed models of type `{model_type}`:")
|
||||||
def write(self, data: str):
|
for model in models:
|
||||||
self.connection.send_bytes(data.encode("utf-8"))
|
path = (config.models_path / model.path).resolve()
|
||||||
|
print(f"{model.name:40}{model.base.value:5}{model.type.value:8}{model.format.value:12}{path}")
|
||||||
def flush(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------
|
# --------------------------------------------------------
|
||||||
def ask_user_for_prediction_type(model_path: Path, tui_conn: Connection = None) -> SchedulerPredictionType:
|
def select_and_download_models(opt: Namespace) -> None:
|
||||||
if tui_conn:
|
"""Prompt user for install/delete selections and execute."""
|
||||||
logger.debug("Waiting for user response...")
|
|
||||||
return _ask_user_for_pt_tui(model_path, tui_conn)
|
|
||||||
else:
|
|
||||||
return _ask_user_for_pt_cmdline(model_path)
|
|
||||||
|
|
||||||
|
|
||||||
def _ask_user_for_pt_cmdline(model_path: Path) -> Optional[SchedulerPredictionType]:
|
|
||||||
choices = [SchedulerPredictionType.Epsilon, SchedulerPredictionType.VPrediction, None]
|
|
||||||
print(
|
|
||||||
f"""
|
|
||||||
Please select the scheduler prediction type of the checkpoint named {model_path.name}:
|
|
||||||
[1] "epsilon" - most v1.5 models and v2 models trained on 512 pixel images
|
|
||||||
[2] "vprediction" - v2 models trained on 768 pixel images and a few v1.5 models
|
|
||||||
[3] Accept the best guess; you can fix it in the Web UI later
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
choice = None
|
|
||||||
ok = False
|
|
||||||
while not ok:
|
|
||||||
try:
|
|
||||||
choice = input("select [3]> ").strip()
|
|
||||||
if not choice:
|
|
||||||
return None
|
|
||||||
choice = choices[int(choice) - 1]
|
|
||||||
ok = True
|
|
||||||
except (ValueError, IndexError):
|
|
||||||
print(f"{choice} is not a valid choice")
|
|
||||||
except EOFError:
|
|
||||||
return
|
|
||||||
return choice
|
|
||||||
|
|
||||||
|
|
||||||
def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection) -> SchedulerPredictionType:
|
|
||||||
tui_conn.send_bytes(f"*need v2 config for:{model_path}".encode("utf-8"))
|
|
||||||
# note that we don't do any status checking here
|
|
||||||
response = tui_conn.recv_bytes().decode("utf-8")
|
|
||||||
if response is None:
|
|
||||||
return None
|
|
||||||
elif response == "epsilon":
|
|
||||||
return SchedulerPredictionType.epsilon
|
|
||||||
elif response == "v":
|
|
||||||
return SchedulerPredictionType.VPrediction
|
|
||||||
elif response == "guess":
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------
|
|
||||||
def process_and_execute(
|
|
||||||
opt: Namespace,
|
|
||||||
selections: InstallSelections,
|
|
||||||
conn_out: Connection = None,
|
|
||||||
):
|
|
||||||
# need to reinitialize config in subprocess
|
|
||||||
config = InvokeAIAppConfig.get_config()
|
|
||||||
args = ["--root", opt.root] if opt.root else []
|
|
||||||
config.parse_args(args)
|
|
||||||
|
|
||||||
# set up so that stderr is sent to conn_out
|
|
||||||
if conn_out:
|
|
||||||
translator = StderrToMessage(conn_out)
|
|
||||||
sys.stderr = translator
|
|
||||||
sys.stdout = translator
|
|
||||||
logger = InvokeAILogger.get_logger()
|
|
||||||
logger.handlers.clear()
|
|
||||||
logger.addHandler(logging.StreamHandler(translator))
|
|
||||||
|
|
||||||
installer = ModelInstall(config, prediction_type_helper=lambda x: ask_user_for_prediction_type(x, conn_out))
|
|
||||||
installer.install(selections)
|
|
||||||
|
|
||||||
if conn_out:
|
|
||||||
conn_out.send_bytes("*done*".encode("utf-8"))
|
|
||||||
conn_out.close()
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------
|
|
||||||
def select_and_download_models(opt: Namespace):
|
|
||||||
precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
|
precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
|
||||||
config.precision = precision
|
# unsure how to avoid a typing complaint in the next line: config.precision is an enumerated Literal
|
||||||
installer = ModelInstall(config, prediction_type_helper=ask_user_for_prediction_type)
|
config.precision = precision # type: ignore
|
||||||
|
install_helper = InstallHelper(config, logger)
|
||||||
|
installer = install_helper.installer
|
||||||
|
|
||||||
if opt.list_models:
|
if opt.list_models:
|
||||||
installer.list_models(opt.list_models)
|
list_models(installer, opt.list_models)
|
||||||
|
|
||||||
elif opt.add or opt.delete:
|
elif opt.add or opt.delete:
|
||||||
selections = InstallSelections(install_models=opt.add or [], remove_models=opt.delete or [])
|
selections = InstallSelections(
|
||||||
installer.install(selections)
|
install_models=[UnifiedModelInfo(source=x) for x in (opt.add or [])], remove_models=opt.delete or []
|
||||||
|
)
|
||||||
|
install_helper.add_or_delete(selections)
|
||||||
|
|
||||||
elif opt.default_only:
|
elif opt.default_only:
|
||||||
selections = InstallSelections(install_models=installer.default_model())
|
default_model = install_helper.default_model()
|
||||||
installer.install(selections)
|
assert default_model is not None
|
||||||
|
selections = InstallSelections(install_models=[default_model])
|
||||||
|
install_helper.add_or_delete(selections)
|
||||||
|
|
||||||
elif opt.yes_to_all:
|
elif opt.yes_to_all:
|
||||||
selections = InstallSelections(install_models=installer.recommended_models())
|
selections = InstallSelections(install_models=install_helper.recommended_models())
|
||||||
installer.install(selections)
|
install_helper.add_or_delete(selections)
|
||||||
|
|
||||||
# this is where the TUI is called
|
# this is where the TUI is called
|
||||||
else:
|
else:
|
||||||
# needed to support the probe() method running under a subprocess
|
|
||||||
torch.multiprocessing.set_start_method("spawn")
|
|
||||||
|
|
||||||
if not set_min_terminal_size(MIN_COLS, MIN_LINES):
|
if not set_min_terminal_size(MIN_COLS, MIN_LINES):
|
||||||
raise WindowTooSmallException(
|
raise WindowTooSmallException(
|
||||||
"Could not increase terminal size. Try running again with a larger window or smaller font size."
|
"Could not increase terminal size. Try running again with a larger window or smaller font size."
|
||||||
)
|
)
|
||||||
|
|
||||||
installApp = AddModelApplication(opt)
|
installApp = AddModelApplication(opt, install_helper)
|
||||||
try:
|
try:
|
||||||
installApp.run()
|
installApp.run()
|
||||||
except KeyboardInterrupt as e:
|
except KeyboardInterrupt:
|
||||||
if hasattr(installApp, "main_form"):
|
print("Aborted...")
|
||||||
if installApp.main_form.subprocess and installApp.main_form.subprocess.is_alive():
|
sys.exit(-1)
|
||||||
logger.info("Terminating subprocesses")
|
|
||||||
installApp.main_form.subprocess.terminate()
|
install_helper.add_or_delete(installApp.install_selections)
|
||||||
installApp.main_form.subprocess = None
|
|
||||||
raise e
|
|
||||||
process_and_execute(opt, installApp.install_selections)
|
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def main():
|
def main() -> None:
|
||||||
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--add",
|
"--add",
|
||||||
@ -754,7 +564,7 @@ def main():
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--delete",
|
"--delete",
|
||||||
nargs="*",
|
nargs="*",
|
||||||
help="List of names of models to idelete",
|
help="List of names of models to delete. Use type:name to disambiguate, as in `controlnet:my_model`",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--full-precision",
|
"--full-precision",
|
||||||
@ -781,14 +591,6 @@ def main():
|
|||||||
choices=[x.value for x in ModelType],
|
choices=[x.value for x in ModelType],
|
||||||
help="list installed models",
|
help="list installed models",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
|
||||||
"--config_file",
|
|
||||||
"-c",
|
|
||||||
dest="config_file",
|
|
||||||
type=str,
|
|
||||||
default=None,
|
|
||||||
help="path to configuration file to create",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--root_dir",
|
"--root_dir",
|
||||||
dest="root",
|
dest="root",
|
||||||
|
@ -6,45 +6,47 @@
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
This is the npyscreen frontend to the model installation application.
|
This is the npyscreen frontend to the model installation application.
|
||||||
It is currently named model_install2.py, but will ultimately replace model_install.py.
|
The work is actually done in backend code in model_install_backend.py.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import curses
|
import curses
|
||||||
|
import logging
|
||||||
import sys
|
import sys
|
||||||
|
import textwrap
|
||||||
import traceback
|
import traceback
|
||||||
import warnings
|
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
|
from multiprocessing import Process
|
||||||
|
from multiprocessing.connection import Connection, Pipe
|
||||||
|
from pathlib import Path
|
||||||
from shutil import get_terminal_size
|
from shutil import get_terminal_size
|
||||||
from typing import Any, Dict, List, Optional, Set
|
from typing import Optional
|
||||||
|
|
||||||
import npyscreen
|
import npyscreen
|
||||||
import torch
|
import torch
|
||||||
from npyscreen import widget
|
from npyscreen import widget
|
||||||
|
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
from invokeai.app.services.model_install import ModelInstallServiceBase
|
from invokeai.backend.install.model_install_backend import InstallSelections, ModelInstall, SchedulerPredictionType
|
||||||
from invokeai.backend.install.install_helper import InstallHelper, InstallSelections, UnifiedModelInfo
|
from invokeai.backend.model_management import ModelManager, ModelType
|
||||||
from invokeai.backend.model_manager import ModelType
|
|
||||||
from invokeai.backend.util import choose_precision, choose_torch_device
|
from invokeai.backend.util import choose_precision, choose_torch_device
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
from invokeai.frontend.install.widgets import (
|
from invokeai.frontend.install.widgets import (
|
||||||
MIN_COLS,
|
MIN_COLS,
|
||||||
MIN_LINES,
|
MIN_LINES,
|
||||||
|
BufferBox,
|
||||||
CenteredTitleText,
|
CenteredTitleText,
|
||||||
CyclingForm,
|
CyclingForm,
|
||||||
MultiSelectColumns,
|
MultiSelectColumns,
|
||||||
SingleSelectColumns,
|
SingleSelectColumns,
|
||||||
TextBox,
|
TextBox,
|
||||||
WindowTooSmallException,
|
WindowTooSmallException,
|
||||||
|
select_stable_diffusion_config_file,
|
||||||
set_min_terminal_size,
|
set_min_terminal_size,
|
||||||
)
|
)
|
||||||
|
|
||||||
warnings.filterwarnings("ignore", category=UserWarning) # noqa: E402
|
|
||||||
config = InvokeAIAppConfig.get_config()
|
config = InvokeAIAppConfig.get_config()
|
||||||
logger = InvokeAILogger.get_logger("ModelInstallService")
|
logger = InvokeAILogger.get_logger()
|
||||||
logger.setLevel("WARNING")
|
|
||||||
# logger.setLevel('DEBUG')
|
|
||||||
|
|
||||||
# build a table mapping all non-printable characters to None
|
# build a table mapping all non-printable characters to None
|
||||||
# for stripping control characters
|
# for stripping control characters
|
||||||
@ -56,42 +58,44 @@ MAX_OTHER_MODELS = 72
|
|||||||
|
|
||||||
|
|
||||||
def make_printable(s: str) -> str:
|
def make_printable(s: str) -> str:
|
||||||
"""Replace non-printable characters in a string."""
|
"""Replace non-printable characters in a string"""
|
||||||
return s.translate(NOPRINT_TRANS_TABLE)
|
return s.translate(NOPRINT_TRANS_TABLE)
|
||||||
|
|
||||||
|
|
||||||
class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
||||||
"""Main form for interactive TUI."""
|
|
||||||
|
|
||||||
# for responsive resizing set to False, but this seems to cause a crash!
|
# for responsive resizing set to False, but this seems to cause a crash!
|
||||||
FIX_MINIMUM_SIZE_WHEN_CREATED = True
|
FIX_MINIMUM_SIZE_WHEN_CREATED = True
|
||||||
|
|
||||||
# for persistence
|
# for persistence
|
||||||
current_tab = 0
|
current_tab = 0
|
||||||
|
|
||||||
def __init__(self, parentApp: npyscreen.NPSAppManaged, name: str, multipage: bool = False, **keywords: Any):
|
def __init__(self, parentApp, name, multipage=False, *args, **keywords):
|
||||||
self.multipage = multipage
|
self.multipage = multipage
|
||||||
self.subprocess = None
|
self.subprocess = None
|
||||||
super().__init__(parentApp=parentApp, name=name, **keywords)
|
super().__init__(parentApp=parentApp, name=name, *args, **keywords) # noqa: B026 # TODO: maybe this is bad?
|
||||||
|
|
||||||
def create(self) -> None:
|
def create(self):
|
||||||
self.installer = self.parentApp.install_helper.installer
|
|
||||||
self.model_labels = self._get_model_labels()
|
|
||||||
self.keypress_timeout = 10
|
self.keypress_timeout = 10
|
||||||
self.counter = 0
|
self.counter = 0
|
||||||
self.subprocess_connection = None
|
self.subprocess_connection = None
|
||||||
|
|
||||||
|
if not config.model_conf_path.exists():
|
||||||
|
with open(config.model_conf_path, "w") as file:
|
||||||
|
print("# InvokeAI model configuration file", file=file)
|
||||||
|
self.installer = ModelInstall(config)
|
||||||
|
self.all_models = self.installer.all_models()
|
||||||
|
self.starter_models = self.installer.starter_models()
|
||||||
|
self.model_labels = self._get_model_labels()
|
||||||
window_width, window_height = get_terminal_size()
|
window_width, window_height = get_terminal_size()
|
||||||
|
|
||||||
# npyscreen has no typing hints
|
self.nextrely -= 1
|
||||||
self.nextrely -= 1 # type: ignore
|
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.FixedText,
|
npyscreen.FixedText,
|
||||||
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields. Cursor keys navigate, and <space> selects.",
|
value="Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields. Cursor keys navigate, and <space> selects.",
|
||||||
editable=False,
|
editable=False,
|
||||||
color="CAUTION",
|
color="CAUTION",
|
||||||
)
|
)
|
||||||
self.nextrely += 1 # type: ignore
|
self.nextrely += 1
|
||||||
self.tabs = self.add_widget_intelligent(
|
self.tabs = self.add_widget_intelligent(
|
||||||
SingleSelectColumns,
|
SingleSelectColumns,
|
||||||
values=[
|
values=[
|
||||||
@ -111,9 +115,9 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
)
|
)
|
||||||
self.tabs.on_changed = self._toggle_tables
|
self.tabs.on_changed = self._toggle_tables
|
||||||
|
|
||||||
top_of_table = self.nextrely # type: ignore
|
top_of_table = self.nextrely
|
||||||
self.starter_pipelines = self.add_starter_pipelines()
|
self.starter_pipelines = self.add_starter_pipelines()
|
||||||
bottom_of_table = self.nextrely # type: ignore
|
bottom_of_table = self.nextrely
|
||||||
|
|
||||||
self.nextrely = top_of_table
|
self.nextrely = top_of_table
|
||||||
self.pipeline_models = self.add_pipeline_widgets(
|
self.pipeline_models = self.add_pipeline_widgets(
|
||||||
@ -158,7 +162,15 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
|
|
||||||
self.nextrely = bottom_of_table + 1
|
self.nextrely = bottom_of_table + 1
|
||||||
|
|
||||||
|
self.monitor = self.add_widget_intelligent(
|
||||||
|
BufferBox,
|
||||||
|
name="Log Messages",
|
||||||
|
editable=False,
|
||||||
|
max_height=6,
|
||||||
|
)
|
||||||
|
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
|
done_label = "APPLY CHANGES"
|
||||||
back_label = "BACK"
|
back_label = "BACK"
|
||||||
cancel_label = "CANCEL"
|
cancel_label = "CANCEL"
|
||||||
current_position = self.nextrely
|
current_position = self.nextrely
|
||||||
@ -174,8 +186,14 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
npyscreen.ButtonPress, name=cancel_label, when_pressed_function=self.on_cancel
|
npyscreen.ButtonPress, name=cancel_label, when_pressed_function=self.on_cancel
|
||||||
)
|
)
|
||||||
self.nextrely = current_position
|
self.nextrely = current_position
|
||||||
|
self.ok_button = self.add_widget_intelligent(
|
||||||
|
npyscreen.ButtonPress,
|
||||||
|
name=done_label,
|
||||||
|
relx=(window_width - len(done_label)) // 2,
|
||||||
|
when_pressed_function=self.on_execute,
|
||||||
|
)
|
||||||
|
|
||||||
label = "APPLY CHANGES"
|
label = "APPLY CHANGES & EXIT"
|
||||||
self.nextrely = current_position
|
self.nextrely = current_position
|
||||||
self.done = self.add_widget_intelligent(
|
self.done = self.add_widget_intelligent(
|
||||||
npyscreen.ButtonPress,
|
npyscreen.ButtonPress,
|
||||||
@ -192,16 +210,17 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
############# diffusers tab ##########
|
############# diffusers tab ##########
|
||||||
def add_starter_pipelines(self) -> dict[str, npyscreen.widget]:
|
def add_starter_pipelines(self) -> dict[str, npyscreen.widget]:
|
||||||
"""Add widgets responsible for selecting diffusers models"""
|
"""Add widgets responsible for selecting diffusers models"""
|
||||||
widgets: Dict[str, npyscreen.widget] = {}
|
widgets = {}
|
||||||
|
models = self.all_models
|
||||||
|
starters = self.starter_models
|
||||||
|
starter_model_labels = self.model_labels
|
||||||
|
|
||||||
all_models = self.all_models # master dict of all models, indexed by key
|
self.installed_models = sorted([x for x in starters if models[x].installed])
|
||||||
model_list = [x for x in self.starter_models if all_models[x].type in ["main", "vae"]]
|
|
||||||
model_labels = [self.model_labels[x] for x in model_list]
|
|
||||||
|
|
||||||
widgets.update(
|
widgets.update(
|
||||||
label1=self.add_widget_intelligent(
|
label1=self.add_widget_intelligent(
|
||||||
CenteredTitleText,
|
CenteredTitleText,
|
||||||
name="Select from a starter set of Stable Diffusion models from HuggingFace and Civitae.",
|
name="Select from a starter set of Stable Diffusion models from HuggingFace.",
|
||||||
editable=False,
|
editable=False,
|
||||||
labelColor="CAUTION",
|
labelColor="CAUTION",
|
||||||
)
|
)
|
||||||
@ -211,24 +230,23 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
# if user has already installed some initial models, then don't patronize them
|
# if user has already installed some initial models, then don't patronize them
|
||||||
# by showing more recommendations
|
# by showing more recommendations
|
||||||
show_recommended = len(self.installed_models) == 0
|
show_recommended = len(self.installed_models) == 0
|
||||||
|
keys = [x for x in models.keys() if x in starters]
|
||||||
checked = [
|
|
||||||
model_list.index(x)
|
|
||||||
for x in model_list
|
|
||||||
if (show_recommended and all_models[x].recommended) or all_models[x].installed
|
|
||||||
]
|
|
||||||
widgets.update(
|
widgets.update(
|
||||||
models_selected=self.add_widget_intelligent(
|
models_selected=self.add_widget_intelligent(
|
||||||
MultiSelectColumns,
|
MultiSelectColumns,
|
||||||
columns=1,
|
columns=1,
|
||||||
name="Install Starter Models",
|
name="Install Starter Models",
|
||||||
values=model_labels,
|
values=[starter_model_labels[x] for x in keys],
|
||||||
value=checked,
|
value=[
|
||||||
max_height=len(model_list) + 1,
|
keys.index(x)
|
||||||
|
for x in keys
|
||||||
|
if (show_recommended and models[x].recommended) or (x in self.installed_models)
|
||||||
|
],
|
||||||
|
max_height=len(starters) + 1,
|
||||||
relx=4,
|
relx=4,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
),
|
),
|
||||||
models=model_list,
|
models=keys,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
@ -239,18 +257,14 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
self,
|
self,
|
||||||
model_type: ModelType,
|
model_type: ModelType,
|
||||||
window_width: int = 120,
|
window_width: int = 120,
|
||||||
install_prompt: Optional[str] = None,
|
install_prompt: str = None,
|
||||||
exclude: Optional[Set[str]] = None,
|
exclude: set = None,
|
||||||
) -> dict[str, npyscreen.widget]:
|
) -> dict[str, npyscreen.widget]:
|
||||||
"""Generic code to create model selection widgets"""
|
"""Generic code to create model selection widgets"""
|
||||||
if exclude is None:
|
if exclude is None:
|
||||||
exclude = set()
|
exclude = set()
|
||||||
widgets: Dict[str, npyscreen.widget] = {}
|
widgets = {}
|
||||||
all_models = self.all_models
|
model_list = [x for x in self.all_models if self.all_models[x].model_type == model_type and x not in exclude]
|
||||||
model_list = sorted(
|
|
||||||
[x for x in all_models if all_models[x].type == model_type and x not in exclude],
|
|
||||||
key=lambda x: all_models[x].name or "",
|
|
||||||
)
|
|
||||||
model_labels = [self.model_labels[x] for x in model_list]
|
model_labels = [self.model_labels[x] for x in model_list]
|
||||||
|
|
||||||
show_recommended = len(self.installed_models) == 0
|
show_recommended = len(self.installed_models) == 0
|
||||||
@ -286,7 +300,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
value=[
|
value=[
|
||||||
model_list.index(x)
|
model_list.index(x)
|
||||||
for x in model_list
|
for x in model_list
|
||||||
if (show_recommended and all_models[x].recommended) or all_models[x].installed
|
if (show_recommended and self.all_models[x].recommended) or self.all_models[x].installed
|
||||||
],
|
],
|
||||||
max_height=len(model_list) // columns + 1,
|
max_height=len(model_list) // columns + 1,
|
||||||
relx=4,
|
relx=4,
|
||||||
@ -310,7 +324,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
download_ids=self.add_widget_intelligent(
|
download_ids=self.add_widget_intelligent(
|
||||||
TextBox,
|
TextBox,
|
||||||
name="Additional URLs, or HuggingFace repo_ids to install (Space separated. Use shift-control-V to paste):",
|
name="Additional URLs, or HuggingFace repo_ids to install (Space separated. Use shift-control-V to paste):",
|
||||||
max_height=6,
|
max_height=4,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
editable=True,
|
editable=True,
|
||||||
)
|
)
|
||||||
@ -335,13 +349,13 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
|
|
||||||
return widgets
|
return widgets
|
||||||
|
|
||||||
def resize(self) -> None:
|
def resize(self):
|
||||||
super().resize()
|
super().resize()
|
||||||
if s := self.starter_pipelines.get("models_selected"):
|
if s := self.starter_pipelines.get("models_selected"):
|
||||||
if model_list := self.starter_pipelines.get("models"):
|
keys = [x for x in self.all_models.keys() if x in self.starter_models]
|
||||||
s.values = [self.model_labels[x] for x in model_list]
|
s.values = [self.model_labels[x] for x in keys]
|
||||||
|
|
||||||
def _toggle_tables(self, value: List[int]) -> None:
|
def _toggle_tables(self, value=None):
|
||||||
selected_tab = value[0]
|
selected_tab = value[0]
|
||||||
widgets = [
|
widgets = [
|
||||||
self.starter_pipelines,
|
self.starter_pipelines,
|
||||||
@ -371,18 +385,17 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
self.display()
|
self.display()
|
||||||
|
|
||||||
def _get_model_labels(self) -> dict[str, str]:
|
def _get_model_labels(self) -> dict[str, str]:
|
||||||
"""Return a list of trimmed labels for all models."""
|
|
||||||
window_width, window_height = get_terminal_size()
|
window_width, window_height = get_terminal_size()
|
||||||
checkbox_width = 4
|
checkbox_width = 4
|
||||||
spacing_width = 2
|
spacing_width = 2
|
||||||
result = {}
|
|
||||||
|
|
||||||
models = self.all_models
|
models = self.all_models
|
||||||
label_width = max([len(models[x].name or "") for x in self.starter_models])
|
label_width = max([len(models[x].name) for x in models])
|
||||||
description_width = window_width - label_width - checkbox_width - spacing_width
|
description_width = window_width - label_width - checkbox_width - spacing_width
|
||||||
|
|
||||||
for key in self.all_models:
|
result = {}
|
||||||
description = models[key].description
|
for x in models.keys():
|
||||||
|
description = models[x].description
|
||||||
description = (
|
description = (
|
||||||
description[0 : description_width - 3] + "..."
|
description[0 : description_width - 3] + "..."
|
||||||
if description and len(description) > description_width
|
if description and len(description) > description_width
|
||||||
@ -390,8 +403,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
if description
|
if description
|
||||||
else ""
|
else ""
|
||||||
)
|
)
|
||||||
result[key] = f"%-{label_width}s %s" % (models[key].name, description)
|
result[x] = f"%-{label_width}s %s" % (models[x].name, description)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _get_columns(self) -> int:
|
def _get_columns(self) -> int:
|
||||||
@ -401,40 +413,50 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
|
|
||||||
def confirm_deletions(self, selections: InstallSelections) -> bool:
|
def confirm_deletions(self, selections: InstallSelections) -> bool:
|
||||||
remove_models = selections.remove_models
|
remove_models = selections.remove_models
|
||||||
if remove_models:
|
if len(remove_models) > 0:
|
||||||
model_names = [self.all_models[x].name or "" for x in remove_models]
|
mods = "\n".join([ModelManager.parse_key(x)[0] for x in remove_models])
|
||||||
mods = "\n".join(model_names)
|
return npyscreen.notify_ok_cancel(
|
||||||
is_ok = npyscreen.notify_ok_cancel(
|
|
||||||
f"These unchecked models will be deleted from disk. Continue?\n---------\n{mods}"
|
f"These unchecked models will be deleted from disk. Continue?\n---------\n{mods}"
|
||||||
)
|
)
|
||||||
assert isinstance(is_ok, bool) # npyscreen doesn't have return type annotations
|
|
||||||
return is_ok
|
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@property
|
def on_execute(self):
|
||||||
def all_models(self) -> Dict[str, UnifiedModelInfo]:
|
self.marshall_arguments()
|
||||||
# npyscreen doesn't having typing hints
|
app = self.parentApp
|
||||||
return self.parentApp.install_helper.all_models # type: ignore
|
if not self.confirm_deletions(app.install_selections):
|
||||||
|
return
|
||||||
|
|
||||||
@property
|
self.monitor.entry_widget.buffer(["Processing..."], scroll_end=True)
|
||||||
def starter_models(self) -> List[str]:
|
self.ok_button.hidden = True
|
||||||
return self.parentApp.install_helper._starter_models # type: ignore
|
self.display()
|
||||||
|
|
||||||
@property
|
# TO DO: Spawn a worker thread, not a subprocess
|
||||||
def installed_models(self) -> List[str]:
|
parent_conn, child_conn = Pipe()
|
||||||
return self.parentApp.install_helper._installed_models # type: ignore
|
p = Process(
|
||||||
|
target=process_and_execute,
|
||||||
|
kwargs={
|
||||||
|
"opt": app.program_opts,
|
||||||
|
"selections": app.install_selections,
|
||||||
|
"conn_out": child_conn,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
p.start()
|
||||||
|
child_conn.close()
|
||||||
|
self.subprocess_connection = parent_conn
|
||||||
|
self.subprocess = p
|
||||||
|
app.install_selections = InstallSelections()
|
||||||
|
|
||||||
def on_back(self) -> None:
|
def on_back(self):
|
||||||
self.parentApp.switchFormPrevious()
|
self.parentApp.switchFormPrevious()
|
||||||
self.editing = False
|
self.editing = False
|
||||||
|
|
||||||
def on_cancel(self) -> None:
|
def on_cancel(self):
|
||||||
self.parentApp.setNextForm(None)
|
self.parentApp.setNextForm(None)
|
||||||
self.parentApp.user_cancelled = True
|
self.parentApp.user_cancelled = True
|
||||||
self.editing = False
|
self.editing = False
|
||||||
|
|
||||||
def on_done(self) -> None:
|
def on_done(self):
|
||||||
self.marshall_arguments()
|
self.marshall_arguments()
|
||||||
if not self.confirm_deletions(self.parentApp.install_selections):
|
if not self.confirm_deletions(self.parentApp.install_selections):
|
||||||
return
|
return
|
||||||
@ -442,7 +464,77 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
self.parentApp.user_cancelled = False
|
self.parentApp.user_cancelled = False
|
||||||
self.editing = False
|
self.editing = False
|
||||||
|
|
||||||
def marshall_arguments(self) -> None:
|
########## This routine monitors the child process that is performing model installation and removal #####
|
||||||
|
def while_waiting(self):
|
||||||
|
"""Called during idle periods. Main task is to update the Log Messages box with messages
|
||||||
|
from the child process that does the actual installation/removal"""
|
||||||
|
c = self.subprocess_connection
|
||||||
|
if not c:
|
||||||
|
return
|
||||||
|
|
||||||
|
monitor_widget = self.monitor.entry_widget
|
||||||
|
while c.poll():
|
||||||
|
try:
|
||||||
|
data = c.recv_bytes().decode("utf-8")
|
||||||
|
data.strip("\n")
|
||||||
|
|
||||||
|
# processing child is requesting user input to select the
|
||||||
|
# right configuration file
|
||||||
|
if data.startswith("*need v2 config"):
|
||||||
|
_, model_path, *_ = data.split(":", 2)
|
||||||
|
self._return_v2_config(model_path)
|
||||||
|
|
||||||
|
# processing child is done
|
||||||
|
elif data == "*done*":
|
||||||
|
self._close_subprocess_and_regenerate_form()
|
||||||
|
break
|
||||||
|
|
||||||
|
# update the log message box
|
||||||
|
else:
|
||||||
|
data = make_printable(data)
|
||||||
|
data = data.replace("[A", "")
|
||||||
|
monitor_widget.buffer(
|
||||||
|
textwrap.wrap(
|
||||||
|
data,
|
||||||
|
width=monitor_widget.width,
|
||||||
|
subsequent_indent=" ",
|
||||||
|
),
|
||||||
|
scroll_end=True,
|
||||||
|
)
|
||||||
|
self.display()
|
||||||
|
except (EOFError, OSError):
|
||||||
|
self.subprocess_connection = None
|
||||||
|
|
||||||
|
def _return_v2_config(self, model_path: str):
|
||||||
|
c = self.subprocess_connection
|
||||||
|
model_name = Path(model_path).name
|
||||||
|
message = select_stable_diffusion_config_file(model_name=model_name)
|
||||||
|
c.send_bytes(message.encode("utf-8"))
|
||||||
|
|
||||||
|
def _close_subprocess_and_regenerate_form(self):
|
||||||
|
app = self.parentApp
|
||||||
|
self.subprocess_connection.close()
|
||||||
|
self.subprocess_connection = None
|
||||||
|
self.monitor.entry_widget.buffer(["** Action Complete **"])
|
||||||
|
self.display()
|
||||||
|
|
||||||
|
# rebuild the form, saving and restoring some of the fields that need to be preserved.
|
||||||
|
saved_messages = self.monitor.entry_widget.values
|
||||||
|
|
||||||
|
app.main_form = app.addForm(
|
||||||
|
"MAIN",
|
||||||
|
addModelsForm,
|
||||||
|
name="Install Stable Diffusion Models",
|
||||||
|
multipage=self.multipage,
|
||||||
|
)
|
||||||
|
app.switchForm("MAIN")
|
||||||
|
|
||||||
|
app.main_form.monitor.entry_widget.values = saved_messages
|
||||||
|
app.main_form.monitor.entry_widget.buffer([""], scroll_end=True)
|
||||||
|
# app.main_form.pipeline_models['autoload_directory'].value = autoload_dir
|
||||||
|
# app.main_form.pipeline_models['autoscan_on_startup'].value = autoscan
|
||||||
|
|
||||||
|
def marshall_arguments(self):
|
||||||
"""
|
"""
|
||||||
Assemble arguments and store as attributes of the application:
|
Assemble arguments and store as attributes of the application:
|
||||||
.starter_models: dict of model names to install from INITIAL_CONFIGURE.yaml
|
.starter_models: dict of model names to install from INITIAL_CONFIGURE.yaml
|
||||||
@ -472,24 +564,46 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
models_to_install = [x for x in selected if not self.all_models[x].installed]
|
models_to_install = [x for x in selected if not self.all_models[x].installed]
|
||||||
models_to_remove = [x for x in section["models"] if x not in selected and self.all_models[x].installed]
|
models_to_remove = [x for x in section["models"] if x not in selected and self.all_models[x].installed]
|
||||||
selections.remove_models.extend(models_to_remove)
|
selections.remove_models.extend(models_to_remove)
|
||||||
selections.install_models.extend([all_models[x] for x in models_to_install])
|
selections.install_models.extend(
|
||||||
|
all_models[x].path or all_models[x].repo_id
|
||||||
|
for x in models_to_install
|
||||||
|
if all_models[x].path or all_models[x].repo_id
|
||||||
|
)
|
||||||
|
|
||||||
# models located in the 'download_ids" section
|
# models located in the 'download_ids" section
|
||||||
for section in ui_sections:
|
for section in ui_sections:
|
||||||
if downloads := section.get("download_ids"):
|
if downloads := section.get("download_ids"):
|
||||||
models = [UnifiedModelInfo(source=x) for x in downloads.value.split()]
|
selections.install_models.extend(downloads.value.split())
|
||||||
selections.install_models.extend(models)
|
|
||||||
|
# NOT NEEDED - DONE IN BACKEND NOW
|
||||||
|
# # special case for the ipadapter_models. If any of the adapters are
|
||||||
|
# # chosen, then we add the corresponding encoder(s) to the install list.
|
||||||
|
# section = self.ipadapter_models
|
||||||
|
# if section.get("models_selected"):
|
||||||
|
# selected_adapters = [
|
||||||
|
# self.all_models[section["models"][x]].name for x in section.get("models_selected").value
|
||||||
|
# ]
|
||||||
|
# encoders = []
|
||||||
|
# if any(["sdxl" in x for x in selected_adapters]):
|
||||||
|
# encoders.append("ip_adapter_sdxl_image_encoder")
|
||||||
|
# if any(["sd15" in x for x in selected_adapters]):
|
||||||
|
# encoders.append("ip_adapter_sd_image_encoder")
|
||||||
|
# for encoder in encoders:
|
||||||
|
# key = f"any/clip_vision/{encoder}"
|
||||||
|
# repo_id = f"InvokeAI/{encoder}"
|
||||||
|
# if key not in self.all_models:
|
||||||
|
# selections.install_models.append(repo_id)
|
||||||
|
|
||||||
|
|
||||||
class AddModelApplication(npyscreen.NPSAppManaged): # type: ignore
|
class AddModelApplication(npyscreen.NPSAppManaged):
|
||||||
def __init__(self, opt: Namespace, install_helper: InstallHelper):
|
def __init__(self, opt):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.program_opts = opt
|
self.program_opts = opt
|
||||||
self.user_cancelled = False
|
self.user_cancelled = False
|
||||||
|
# self.autoload_pending = True
|
||||||
self.install_selections = InstallSelections()
|
self.install_selections = InstallSelections()
|
||||||
self.install_helper = install_helper
|
|
||||||
|
|
||||||
def onStart(self) -> None:
|
def onStart(self):
|
||||||
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
|
||||||
self.main_form = self.addForm(
|
self.main_form = self.addForm(
|
||||||
"MAIN",
|
"MAIN",
|
||||||
@ -499,62 +613,138 @@ class AddModelApplication(npyscreen.NPSAppManaged): # type: ignore
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def list_models(installer: ModelInstallServiceBase, model_type: ModelType):
|
class StderrToMessage:
|
||||||
"""Print out all models of type model_type."""
|
def __init__(self, connection: Connection):
|
||||||
models = installer.record_store.search_by_attr(model_type=model_type)
|
self.connection = connection
|
||||||
print(f"Installed models of type `{model_type}`:")
|
|
||||||
for model in models:
|
def write(self, data: str):
|
||||||
path = (config.models_path / model.path).resolve()
|
self.connection.send_bytes(data.encode("utf-8"))
|
||||||
print(f"{model.name:40}{model.base.value:5}{model.type.value:8}{model.format.value:12}{path}")
|
|
||||||
|
def flush(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------
|
# --------------------------------------------------------
|
||||||
def select_and_download_models(opt: Namespace) -> None:
|
def ask_user_for_prediction_type(model_path: Path, tui_conn: Connection = None) -> SchedulerPredictionType:
|
||||||
"""Prompt user for install/delete selections and execute."""
|
if tui_conn:
|
||||||
|
logger.debug("Waiting for user response...")
|
||||||
|
return _ask_user_for_pt_tui(model_path, tui_conn)
|
||||||
|
else:
|
||||||
|
return _ask_user_for_pt_cmdline(model_path)
|
||||||
|
|
||||||
|
|
||||||
|
def _ask_user_for_pt_cmdline(model_path: Path) -> Optional[SchedulerPredictionType]:
|
||||||
|
choices = [SchedulerPredictionType.Epsilon, SchedulerPredictionType.VPrediction, None]
|
||||||
|
print(
|
||||||
|
f"""
|
||||||
|
Please select the scheduler prediction type of the checkpoint named {model_path.name}:
|
||||||
|
[1] "epsilon" - most v1.5 models and v2 models trained on 512 pixel images
|
||||||
|
[2] "vprediction" - v2 models trained on 768 pixel images and a few v1.5 models
|
||||||
|
[3] Accept the best guess; you can fix it in the Web UI later
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
choice = None
|
||||||
|
ok = False
|
||||||
|
while not ok:
|
||||||
|
try:
|
||||||
|
choice = input("select [3]> ").strip()
|
||||||
|
if not choice:
|
||||||
|
return None
|
||||||
|
choice = choices[int(choice) - 1]
|
||||||
|
ok = True
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
print(f"{choice} is not a valid choice")
|
||||||
|
except EOFError:
|
||||||
|
return
|
||||||
|
return choice
|
||||||
|
|
||||||
|
|
||||||
|
def _ask_user_for_pt_tui(model_path: Path, tui_conn: Connection) -> SchedulerPredictionType:
|
||||||
|
tui_conn.send_bytes(f"*need v2 config for:{model_path}".encode("utf-8"))
|
||||||
|
# note that we don't do any status checking here
|
||||||
|
response = tui_conn.recv_bytes().decode("utf-8")
|
||||||
|
if response is None:
|
||||||
|
return None
|
||||||
|
elif response == "epsilon":
|
||||||
|
return SchedulerPredictionType.epsilon
|
||||||
|
elif response == "v":
|
||||||
|
return SchedulerPredictionType.VPrediction
|
||||||
|
elif response == "guess":
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------
|
||||||
|
def process_and_execute(
|
||||||
|
opt: Namespace,
|
||||||
|
selections: InstallSelections,
|
||||||
|
conn_out: Connection = None,
|
||||||
|
):
|
||||||
|
# need to reinitialize config in subprocess
|
||||||
|
config = InvokeAIAppConfig.get_config()
|
||||||
|
args = ["--root", opt.root] if opt.root else []
|
||||||
|
config.parse_args(args)
|
||||||
|
|
||||||
|
# set up so that stderr is sent to conn_out
|
||||||
|
if conn_out:
|
||||||
|
translator = StderrToMessage(conn_out)
|
||||||
|
sys.stderr = translator
|
||||||
|
sys.stdout = translator
|
||||||
|
logger = InvokeAILogger.get_logger()
|
||||||
|
logger.handlers.clear()
|
||||||
|
logger.addHandler(logging.StreamHandler(translator))
|
||||||
|
|
||||||
|
installer = ModelInstall(config, prediction_type_helper=lambda x: ask_user_for_prediction_type(x, conn_out))
|
||||||
|
installer.install(selections)
|
||||||
|
|
||||||
|
if conn_out:
|
||||||
|
conn_out.send_bytes("*done*".encode("utf-8"))
|
||||||
|
conn_out.close()
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------
|
||||||
|
def select_and_download_models(opt: Namespace):
|
||||||
precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
|
precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
|
||||||
# unsure how to avoid a typing complaint in the next line: config.precision is an enumerated Literal
|
config.precision = precision
|
||||||
config.precision = precision # type: ignore
|
installer = ModelInstall(config, prediction_type_helper=ask_user_for_prediction_type)
|
||||||
install_helper = InstallHelper(config, logger)
|
|
||||||
installer = install_helper.installer
|
|
||||||
|
|
||||||
if opt.list_models:
|
if opt.list_models:
|
||||||
list_models(installer, opt.list_models)
|
installer.list_models(opt.list_models)
|
||||||
|
|
||||||
elif opt.add or opt.delete:
|
elif opt.add or opt.delete:
|
||||||
selections = InstallSelections(
|
selections = InstallSelections(install_models=opt.add or [], remove_models=opt.delete or [])
|
||||||
install_models=[UnifiedModelInfo(source=x) for x in (opt.add or [])], remove_models=opt.delete or []
|
installer.install(selections)
|
||||||
)
|
|
||||||
install_helper.add_or_delete(selections)
|
|
||||||
|
|
||||||
elif opt.default_only:
|
elif opt.default_only:
|
||||||
default_model = install_helper.default_model()
|
selections = InstallSelections(install_models=installer.default_model())
|
||||||
assert default_model is not None
|
installer.install(selections)
|
||||||
selections = InstallSelections(install_models=[default_model])
|
|
||||||
install_helper.add_or_delete(selections)
|
|
||||||
|
|
||||||
elif opt.yes_to_all:
|
elif opt.yes_to_all:
|
||||||
selections = InstallSelections(install_models=install_helper.recommended_models())
|
selections = InstallSelections(install_models=installer.recommended_models())
|
||||||
install_helper.add_or_delete(selections)
|
installer.install(selections)
|
||||||
|
|
||||||
# this is where the TUI is called
|
# this is where the TUI is called
|
||||||
else:
|
else:
|
||||||
|
# needed to support the probe() method running under a subprocess
|
||||||
|
torch.multiprocessing.set_start_method("spawn")
|
||||||
|
|
||||||
if not set_min_terminal_size(MIN_COLS, MIN_LINES):
|
if not set_min_terminal_size(MIN_COLS, MIN_LINES):
|
||||||
raise WindowTooSmallException(
|
raise WindowTooSmallException(
|
||||||
"Could not increase terminal size. Try running again with a larger window or smaller font size."
|
"Could not increase terminal size. Try running again with a larger window or smaller font size."
|
||||||
)
|
)
|
||||||
|
|
||||||
installApp = AddModelApplication(opt, install_helper)
|
installApp = AddModelApplication(opt)
|
||||||
try:
|
try:
|
||||||
installApp.run()
|
installApp.run()
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt as e:
|
||||||
print("Aborted...")
|
if hasattr(installApp, "main_form"):
|
||||||
sys.exit(-1)
|
if installApp.main_form.subprocess and installApp.main_form.subprocess.is_alive():
|
||||||
|
logger.info("Terminating subprocesses")
|
||||||
install_helper.add_or_delete(installApp.install_selections)
|
installApp.main_form.subprocess.terminate()
|
||||||
|
installApp.main_form.subprocess = None
|
||||||
|
raise e
|
||||||
|
process_and_execute(opt, installApp.install_selections)
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def main() -> None:
|
def main():
|
||||||
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--add",
|
"--add",
|
||||||
@ -564,7 +754,7 @@ def main() -> None:
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--delete",
|
"--delete",
|
||||||
nargs="*",
|
nargs="*",
|
||||||
help="List of names of models to delete. Use type:name to disambiguate, as in `controlnet:my_model`",
|
help="List of names of models to idelete",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--full-precision",
|
"--full-precision",
|
||||||
@ -591,6 +781,14 @@ def main() -> None:
|
|||||||
choices=[x.value for x in ModelType],
|
choices=[x.value for x in ModelType],
|
||||||
help="list installed models",
|
help="list installed models",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--config_file",
|
||||||
|
"-c",
|
||||||
|
dest="config_file",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="path to configuration file to create",
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--root_dir",
|
"--root_dir",
|
||||||
dest="root",
|
dest="root",
|
@ -267,6 +267,17 @@ class SingleSelectWithChanged(npyscreen.SelectOne):
|
|||||||
self.on_changed(self.value)
|
self.on_changed(self.value)
|
||||||
|
|
||||||
|
|
||||||
|
class CheckboxWithChanged(npyscreen.Checkbox):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.on_changed = None
|
||||||
|
|
||||||
|
def whenToggled(self):
|
||||||
|
super().whenToggled()
|
||||||
|
if self.on_changed:
|
||||||
|
self.on_changed(self.value)
|
||||||
|
|
||||||
|
|
||||||
class SingleSelectColumnsSimple(SelectColumnBase, SingleSelectWithChanged):
|
class SingleSelectColumnsSimple(SelectColumnBase, SingleSelectWithChanged):
|
||||||
"""Row of radio buttons. Spacebar to select."""
|
"""Row of radio buttons. Spacebar to select."""
|
||||||
|
|
||||||
|
@ -136,8 +136,7 @@ dependencies = [
|
|||||||
|
|
||||||
# full commands
|
# full commands
|
||||||
"invokeai-configure" = "invokeai.frontend.install.invokeai_configure:invokeai_configure"
|
"invokeai-configure" = "invokeai.frontend.install.invokeai_configure:invokeai_configure"
|
||||||
"invokeai-merge" = "invokeai.frontend.merge:invokeai_merge_diffusers"
|
"invokeai-merge" = "invokeai.frontend.merge.merge_diffusers:main"
|
||||||
"invokeai-merge2" = "invokeai.frontend.merge.merge_diffusers2:main"
|
|
||||||
"invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion"
|
"invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion"
|
||||||
"invokeai-model-install" = "invokeai.frontend.install.model_install:main"
|
"invokeai-model-install" = "invokeai.frontend.install.model_install:main"
|
||||||
"invokeai-model-install2" = "invokeai.frontend.install.model_install2:main" # will eventually be renamed to invokeai-model-install
|
"invokeai-model-install2" = "invokeai.frontend.install.model_install2:main" # will eventually be renamed to invokeai-model-install
|
||||||
|
@ -1,47 +0,0 @@
|
|||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig
|
|
||||||
from invokeai.backend import BaseModelType, ModelManager, ModelType, SubModelType
|
|
||||||
|
|
||||||
BASIC_MODEL_NAME = ("SDXL base", BaseModelType.StableDiffusionXL, ModelType.Main)
|
|
||||||
VAE_OVERRIDE_MODEL_NAME = ("SDXL with VAE", BaseModelType.StableDiffusionXL, ModelType.Main)
|
|
||||||
VAE_NULL_OVERRIDE_MODEL_NAME = ("SDXL with empty VAE", BaseModelType.StableDiffusionXL, ModelType.Main)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def model_manager(datadir) -> ModelManager:
|
|
||||||
InvokeAIAppConfig.get_config(root=datadir)
|
|
||||||
return ModelManager(datadir / "configs" / "relative_sub.models.yaml")
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_model_names(model_manager: ModelManager):
|
|
||||||
names = model_manager.model_names()
|
|
||||||
assert names[:2] == [BASIC_MODEL_NAME, VAE_OVERRIDE_MODEL_NAME]
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_model_path_for_diffusers(model_manager: ModelManager, datadir: Path):
|
|
||||||
model_config = model_manager._get_model_config(BASIC_MODEL_NAME[1], BASIC_MODEL_NAME[0], BASIC_MODEL_NAME[2])
|
|
||||||
top_model_path, is_override = model_manager._get_model_path(model_config)
|
|
||||||
expected_model_path = datadir / "models" / "sdxl" / "main" / "SDXL base 1_0"
|
|
||||||
assert top_model_path == expected_model_path
|
|
||||||
assert not is_override
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_model_path_for_overridden_vae(model_manager: ModelManager, datadir: Path):
|
|
||||||
model_config = model_manager._get_model_config(
|
|
||||||
VAE_OVERRIDE_MODEL_NAME[1], VAE_OVERRIDE_MODEL_NAME[0], VAE_OVERRIDE_MODEL_NAME[2]
|
|
||||||
)
|
|
||||||
vae_model_path, is_override = model_manager._get_model_path(model_config, SubModelType.Vae)
|
|
||||||
expected_vae_path = datadir / "models" / "sdxl" / "vae" / "sdxl-vae-fp16-fix"
|
|
||||||
assert vae_model_path == expected_vae_path
|
|
||||||
assert is_override
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_model_path_for_null_overridden_vae(model_manager: ModelManager, datadir: Path):
|
|
||||||
model_config = model_manager._get_model_config(
|
|
||||||
VAE_NULL_OVERRIDE_MODEL_NAME[1], VAE_NULL_OVERRIDE_MODEL_NAME[0], VAE_NULL_OVERRIDE_MODEL_NAME[2]
|
|
||||||
)
|
|
||||||
vae_model_path, is_override = model_manager._get_model_path(model_config, SubModelType.Vae)
|
|
||||||
assert not is_override
|
|
Loading…
Reference in New Issue
Block a user