configure script now working

This commit is contained in:
Lincoln Stein
2023-09-22 22:15:42 -04:00
parent 07ddd601e1
commit d2cdbe5c4e
15 changed files with 347 additions and 765 deletions

View File

@ -234,6 +234,18 @@ Paths:
# controlnet_dir: null
```
### Model Cache
These options control the size of various caches that InvokeAI uses
during the model loading and conversion process. All units are in GB
| Setting | Default Value | Description |
|----------|----------------|--------------|
| `disk` | `20.0` | Before loading a model into memory, InvokeAI converts .ckpt and .safetensors models into diffusers format and saves them to disk. This option controls the maximum size of the directory in which these converted models are stored. If set to zero, then only the most recently-used model will be cached. |
| `ram` | `6.0` | After loading a model from disk, it is kept in system RAM until it is needed again. This option controls how much RAM is set aside for this purpose. Larger amounts allow more models to reside in RAM and for InvokeAI to quickly switch between them. |
| `vram` | `0.25` | This allows smaller models to remain in VRAM, speeding up execution modestly. It should be a small number. |
### Logging
These settings control the information, warning, and debugging

View File

@ -182,8 +182,9 @@ from .base import InvokeAISettings
INIT_FILE = Path("invokeai.yaml")
DB_FILE = Path("invokeai.db")
LEGACY_INIT_FILE = Path("invokeai.init")
DEFAULT_MAX_VRAM = 0.5
DEFAULT_MAX_DISK_CACHE = 15 # gigs, enough for two sdxl models, or 5 sd-1 models
DEFAULT_MAX_DISK_CACHE = 20 # gigs, enough for three sdxl models, or 6 sd-1 models
DEFAULT_RAM_CACHE = 6.0
DEFAULT_VRAM_CACHE = 0.25
class InvokeAIAppConfig(InvokeAISettings):
@ -241,8 +242,8 @@ class InvokeAIAppConfig(InvokeAISettings):
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
# CACHE
ram : Union[float, Literal["auto"]] = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number or 'auto')", category="Model Cache", )
vram : Union[float, Literal["auto"]] = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number or 'auto')", category="Model Cache", )
ram : Union[float, Literal["auto"]] = Field(default=DEFAULT_RAM_CACHE, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number or 'auto')", category="Model Cache", )
vram : Union[float, Literal["auto"]] = Field(default=DEFAULT_VRAM_CACHE, ge=0, description="Amount of VRAM reserved for model storage (floating point number or 'auto')", category="Model Cache", )
disk : float = Field(default=DEFAULT_MAX_DISK_CACHE, ge=0, description="Maximum size (in GB) for the disk-based diffusers model conversion cache", category="Model Cache", )
lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", category="Model Cache", )

View File

@ -0,0 +1,179 @@
"""
Utility (backend) functions used by model_install.py
"""
from pathlib import Path
from typing import Dict, List, Optional
import omegaconf
from huggingface_hub import HfFolder
from pydantic import BaseModel, Field
from pydantic.dataclasses import dataclass
from tqdm import tqdm
import invokeai.configs as configs
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_manager import BaseModelType, ModelInstall, ModelInstallJob, ModelType
from invokeai.backend.model_manager.install import ModelSourceMetadata
# name of the starter models file
INITIAL_MODELS = "INITIAL_MODELS.yaml"
ACCESS_TOKEN = HfFolder.get_token()
class UnifiedModelInfo(BaseModel):
name: Optional[str] = None
base_model: Optional[BaseModelType] = None
model_type: Optional[ModelType] = None
source: Optional[str] = None
description: Optional[str] = None
recommended: bool = False
installed: bool = False
default: bool = False
@dataclass
class InstallSelections:
install_models: List[UnifiedModelInfo] = Field(default_factory=list)
remove_models: List[str] = Field(default_factory=list)
class TqdmProgress(object):
_bars: Dict[int, tqdm] # the tqdm object
_last: Dict[int, int] # last bytes downloaded
def __init__(self):
self._bars = dict()
self._last = dict()
def job_update(self, job: ModelInstallJob):
job_id = job.id
if job.status == "running":
if job_id not in self._bars:
dest = Path(job.destination).name
self._bars[job_id] = tqdm(
desc=dest,
initial=0,
total=job.total_bytes,
unit="iB",
unit_scale=True,
)
self._last[job_id] = 0
self._bars[job_id].update(job.bytes - self._last[job_id])
self._last[job_id] = job.bytes
class InstallHelper(object):
"""Capture information stored jointly in INITIAL_MODELS.yaml and the installed models db."""
all_models: Dict[str, UnifiedModelInfo] = dict()
_installer: ModelInstall
_config: InvokeAIAppConfig
_installed_models: List[str] = []
_starter_models: List[str] = []
_default_model: Optional[str] = None
_initial_models: omegaconf.DictConfig
def __init__(self, config: InvokeAIAppConfig):
self._config = config
self._installer = ModelInstall(config=config, event_handlers=[TqdmProgress().job_update])
self._initial_models = omegaconf.OmegaConf.load(Path(configs.__path__[0]) / INITIAL_MODELS)
self._initialize_model_lists()
@property
def installer(self) -> ModelInstall:
return self._installer
def _initialize_model_lists(self):
"""
Initialize our model slots.
Set up the following:
installed_models -- list of installed model keys
starter_models -- list of starter model keys from INITIAL_MODELS
all_models -- dict of key => UnifiedModelInfo
default_model -- key to default model
"""
# previously-installed models
for model in self._installer.store.all_models():
info = UnifiedModelInfo.parse_obj(model.dict())
info.installed = True
key = f"{model.base_model.value}/{model.model_type.value}/{model.name}"
self.all_models[key] = info
self._installed_models.append(key)
for key in self._initial_models.keys():
if key in self.all_models:
# we want to preserve the description
description = self.all_models[key].description or self._initial_models[key].get("description")
self.all_models[key].description = description
else:
base_model, model_type, model_name = key.split("/")
info = UnifiedModelInfo(
name=model_name,
model_type=model_type,
base_model=base_model,
source=self._initial_models[key].source,
description=self._initial_models[key].get("description"),
recommended=self._initial_models[key].get("recommended", False),
default=self._initial_models[key].get("default", False),
)
self.all_models[key] = info
if not self.default_model:
self._default_model = key
elif self._initial_models[key].get("default", False):
self._default_model = key
self._starter_models.append(key)
# previously-installed models
for model in self._installer.store.all_models():
info = UnifiedModelInfo.parse_obj(model.dict())
info.installed = True
key = f"{model.base_model.value}/{model.model_type.value}/{model.name}"
self.all_models[key] = info
self._installed_models.append(key)
def recommended_models(self) -> List[UnifiedModelInfo]:
return [self._to_model(x) for x in self._starter_models if self._to_model(x).recommended]
def installed_models(self) -> List[UnifiedModelInfo]:
return [self._to_model(x) for x in self._installed_models]
def starter_models(self) -> List[UnifiedModelInfo]:
return [self._to_model(x) for x in self._starter_models]
def default_model(self) -> UnifiedModelInfo:
return self._to_model(self._default_model)
def _to_model(self, key: str) -> UnifiedModelInfo:
return self.all_models[key]
def add_or_delete(self, selections: InstallSelections):
installer = self._installer
for model in selections.install_models:
metadata = ModelSourceMetadata(description=model.description, name=model.name)
installer.install(
model.source,
variant="fp16" if self._config.precision == "float16" else None,
access_token=ACCESS_TOKEN, # this is a global,
metadata=metadata,
)
for model in selections.remove_models:
parts = model.split("/")
if len(parts) == 1:
base_model, model_type, model_name = (None, None, model)
else:
base_model, model_type, model_name = parts
matches = installer.store.search_by_name(
base_model=base_model, model_type=model_type, model_name=model_name
)
if len(matches) > 1:
print(f"{model} is ambiguous. Please use model_type:model_name (e.g. main:my_model) to disambiguate.")
elif not matches:
print(f"{model}: unknown model")
else:
for m in matches:
print(f"Deleting {m.model_type}:{m.name}")
installer.conditionally_delete(m.key)
installer.wait_for_installs()

View File

@ -22,7 +22,6 @@ from typing import Any, get_args, get_type_hints
from urllib import request
import npyscreen
import omegaconf
import psutil
import torch
import transformers
@ -39,10 +38,10 @@ from transformers import AutoFeatureExtractor, BertTokenizerFast, CLIPTextConfig
import invokeai.configs as configs
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
from invokeai.backend.install.model_install_backend import InstallSelections, ModelInstall, hf_download_from_pretrained
from invokeai.backend.model_management.model_probe import BaseModelType, ModelType
from invokeai.backend.model_manager import BaseModelType, ModelType
from invokeai.backend.util import choose_precision, choose_torch_device
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
from invokeai.frontend.install.model_install import addModelsForm
# TO DO - Move all the frontend code into invokeai.frontend.install
from invokeai.frontend.install.widgets import (
@ -57,6 +56,8 @@ from invokeai.frontend.install.widgets import (
set_min_terminal_size,
)
from .install_helper import InstallHelper, InstallSelections
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()
@ -83,7 +84,6 @@ GB = 1073741824 # GB in bytes
HAS_CUDA = torch.cuda.is_available()
_, MAX_VRAM = torch.cuda.mem_get_info() if HAS_CUDA else (0, 0)
MAX_VRAM /= GB
MAX_RAM = psutil.virtual_memory().total / GB
@ -180,6 +180,20 @@ class ProgressBar:
self.pbar.update(block_size)
# ---------------------------------------------
def hf_download_from_pretrained(model_class: object, model_name: str, destination: Path, **kwargs):
logger = InvokeAILogger.getLogger("InvokeAIConfigure")
logger.addFilter(lambda x: "fp16 is not a valid" not in x.getMessage())
model = model_class.from_pretrained(
model_name,
resume_download=True,
**kwargs,
)
model.save_pretrained(destination, safe_serialization=True)
return destination
# ---------------------------------------------
def download_with_progress_bar(model_url: str, model_dest: str, label: str = "the"):
try:
@ -456,6 +470,25 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
max_width=110,
scroll_exit=True,
)
self.add_widget_intelligent(
npyscreen.TitleFixedText,
name="Model disk conversion cache size (GB). This is used to cache safetensors files that need to be converted to diffusers..",
begin_entry_at=0,
editable=False,
color="CONTROL",
scroll_exit=True,
)
self.nextrely -= 1
self.disk = self.add_widget_intelligent(
npyscreen.Slider,
value=clip(old_opts.disk, range=(0, 100), step=0.5),
out_of=100,
lowest=0.0,
step=0.5,
relx=8,
scroll_exit=True,
)
self.nextrely += 1
self.add_widget_intelligent(
npyscreen.TitleFixedText,
name="Model RAM cache size (GB). Make this at least large enough to hold a single full model.",
@ -591,6 +624,7 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
for attr in [
"ram",
"vram",
"disk",
"outdir",
]:
if hasattr(self, attr):
@ -616,13 +650,14 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
class EditOptApplication(npyscreen.NPSAppManaged):
def __init__(self, program_opts: Namespace, invokeai_opts: Namespace):
def __init__(self, program_opts: Namespace, invokeai_opts: Namespace, install_helper: InstallHelper):
super().__init__()
self.program_opts = program_opts
self.invokeai_opts = invokeai_opts
self.user_cancelled = False
self.autoload_pending = True
self.install_selections = default_user_selections(program_opts)
self.install_helper = install_helper
self.install_selections = default_user_selections(program_opts, install_helper)
def onStart(self):
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
@ -645,32 +680,17 @@ class EditOptApplication(npyscreen.NPSAppManaged):
return self.options.marshall_arguments()
def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Namespace:
editApp = EditOptApplication(program_opts, invokeai_opts)
editApp.run()
return editApp.new_opts()
def default_startup_options(init_file: Path) -> Namespace:
opts = InvokeAIAppConfig.get_config()
return opts
def default_user_selections(program_opts: Namespace) -> InstallSelections:
try:
installer = ModelInstall(config)
except omegaconf.errors.ConfigKeyError:
logger.warning("Your models.yaml file is corrupt or out of date. Reinitializing")
initialize_rootdir(config.root_path, True)
installer = ModelInstall(config)
models = installer.all_models()
def default_user_selections(program_opts: Namespace, install_helper: InstallHelper) -> InstallSelections:
default_models = (
[install_helper.default_model()] if program_opts.default_only else install_helper.recommended_models()
)
return InstallSelections(
install_models=[models[installer.default_model()].path or models[installer.default_model()].repo_id]
if program_opts.default_only
else [models[x].path or models[x].repo_id for x in installer.recommended_models()]
if program_opts.yes_to_all
else list(),
install_models=default_models if program_opts.yes_to_all else list(),
)
@ -720,7 +740,7 @@ def maybe_create_models_yaml(root: Path):
# -------------------------------------
def run_console_ui(program_opts: Namespace, initfile: Path = None) -> (Namespace, Namespace):
def run_console_ui(program_opts: Namespace, initfile: Path, install_helper: InstallHelper) -> (Namespace, Namespace):
invokeai_opts = default_startup_options(initfile)
invokeai_opts.root = program_opts.root
@ -729,13 +749,7 @@ def run_console_ui(program_opts: Namespace, initfile: Path = None) -> (Namespace
"Could not increase terminal size. Try running again with a larger window or smaller font size."
)
# the install-models application spawns a subprocess to install
# models, and will crash unless this is set before running.
import torch
torch.multiprocessing.set_start_method("spawn")
editApp = EditOptApplication(program_opts, invokeai_opts)
editApp = EditOptApplication(program_opts, invokeai_opts, install_helper)
editApp.run()
if editApp.user_cancelled:
return (None, None)
@ -894,6 +908,7 @@ def main():
if opt.full_precision:
invoke_args.extend(["--precision", "float32"])
config.parse_args(invoke_args)
config.precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
logger = InvokeAILogger().getLogger(config=config)
errors = set()
@ -907,14 +922,18 @@ def main():
# run this unconditionally in case new directories need to be added
initialize_rootdir(config.root_path, opt.yes_to_all)
models_to_download = default_user_selections(opt)
# this will initialize the models.yaml file if not present
install_helper = InstallHelper(config)
models_to_download = default_user_selections(opt, install_helper)
new_init_file = config.root_path / "invokeai.yaml"
if opt.yes_to_all:
write_default_options(opt, new_init_file)
init_options = Namespace(precision="float32" if opt.full_precision else "float16")
else:
init_options, models_to_download = run_console_ui(opt, new_init_file)
init_options, models_to_download = run_console_ui(opt, new_init_file, install_helper)
if init_options:
write_opts(init_options, new_init_file)
else:
@ -929,10 +948,12 @@ def main():
if opt.skip_sd_weights:
logger.warning("Skipping diffusion weights download per user request")
elif models_to_download:
process_and_execute(opt, models_to_download)
install_helper.add_or_delete(models_to_download)
postscript(errors=errors)
if not opt.yes_to_all:
input("Press any key to continue...")
except WindowTooSmallException as e:

View File

@ -1,551 +0,0 @@
"""
Utility (backend) functions used by model_install.py
"""
import os
import shutil
import warnings
from dataclasses import dataclass, field
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Callable, Dict, List, Optional, Set, Union
import requests
import torch
from diffusers import DiffusionPipeline
from diffusers import logging as dlogging
from huggingface_hub import HfApi, HfFolder, hf_hub_url
from omegaconf import OmegaConf
from tqdm import tqdm
import invokeai.configs as configs
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import AddModelResult, BaseModelType, ModelManager, ModelType, ModelVariantType
from invokeai.backend.model_management.model_probe import ModelProbe, ModelProbeInfo, SchedulerPredictionType
from invokeai.backend.util import download_with_resume
from invokeai.backend.util.devices import choose_torch_device, torch_dtype
from ..util.logging import InvokeAILogger
warnings.filterwarnings("ignore")
# --------------------------globals-----------------------
config = InvokeAIAppConfig.get_config()
logger = InvokeAILogger.getLogger(name="InvokeAI")
# the initial "configs" dir is now bundled in the `invokeai.configs` package
Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml"
Config_preamble = """
# This file describes the alternative machine learning models
# available to InvokeAI script.
#
# To add a new model, follow the examples below. Each
# model requires a model config file, a weights file,
# and the width and height of the images it
# was trained on.
"""
LEGACY_CONFIGS = {
BaseModelType.StableDiffusion1: {
ModelVariantType.Normal: "v1-inference.yaml",
ModelVariantType.Inpaint: "v1-inpainting-inference.yaml",
},
BaseModelType.StableDiffusion2: {
ModelVariantType.Normal: {
SchedulerPredictionType.Epsilon: "v2-inference.yaml",
SchedulerPredictionType.VPrediction: "v2-inference-v.yaml",
},
ModelVariantType.Inpaint: {
SchedulerPredictionType.Epsilon: "v2-inpainting-inference.yaml",
SchedulerPredictionType.VPrediction: "v2-inpainting-inference-v.yaml",
},
},
BaseModelType.StableDiffusionXL: {
ModelVariantType.Normal: "sd_xl_base.yaml",
},
BaseModelType.StableDiffusionXLRefiner: {
ModelVariantType.Normal: "sd_xl_refiner.yaml",
},
}
@dataclass
class ModelInstallList:
"""Class for listing models to be installed/removed"""
install_models: List[str] = field(default_factory=list)
remove_models: List[str] = field(default_factory=list)
@dataclass
class InstallSelections:
install_models: List[str] = field(default_factory=list)
remove_models: List[str] = field(default_factory=list)
@dataclass
class ModelLoadInfo:
name: str
model_type: ModelType
base_type: BaseModelType
path: Optional[Path] = None
repo_id: Optional[str] = None
description: str = ""
installed: bool = False
recommended: bool = False
default: bool = False
class ModelInstall(object):
def __init__(
self,
config: InvokeAIAppConfig,
prediction_type_helper: Optional[Callable[[Path], SchedulerPredictionType]] = None,
model_manager: Optional[ModelManager] = None,
access_token: Optional[str] = None,
):
self.config = config
self.mgr = model_manager or ModelManager(config.model_conf_path)
self.datasets = OmegaConf.load(Dataset_path)
self.prediction_helper = prediction_type_helper
self.access_token = access_token or HfFolder.get_token()
self.reverse_paths = self._reverse_paths(self.datasets)
def all_models(self) -> Dict[str, ModelLoadInfo]:
"""
Return dict of model_key=>ModelLoadInfo objects.
This method consolidates and simplifies the entries in both
models.yaml and INITIAL_MODELS.yaml so that they can
be treated uniformly. It also sorts the models alphabetically
by their name, to improve the display somewhat.
"""
model_dict = dict()
# first populate with the entries in INITIAL_MODELS.yaml
for key, value in self.datasets.items():
name, base, model_type = ModelManager.parse_key(key)
value["name"] = name
value["base_type"] = base
value["model_type"] = model_type
model_dict[key] = ModelLoadInfo(**value)
# supplement with entries in models.yaml
installed_models = [x for x in self.mgr.list_models()]
# suppresses autoloaded models
# installed_models = [x for x in self.mgr.list_models() if not self._is_autoloaded(x)]
for md in installed_models:
base = md["base_model"]
model_type = md["model_type"]
name = md["model_name"]
key = ModelManager.create_key(name, base, model_type)
if key in model_dict:
model_dict[key].installed = True
else:
model_dict[key] = ModelLoadInfo(
name=name,
base_type=base,
model_type=model_type,
path=value.get("path"),
installed=True,
)
return {x: model_dict[x] for x in sorted(model_dict.keys(), key=lambda y: model_dict[y].name.lower())}
def _is_autoloaded(self, model_info: dict) -> bool:
path = model_info.get("path")
if not path:
return False
for autodir in ["autoimport_dir", "lora_dir", "embedding_dir", "controlnet_dir"]:
if autodir_path := getattr(self.config, autodir):
autodir_path = self.config.root_path / autodir_path
if Path(path).is_relative_to(autodir_path):
return True
return False
def list_models(self, model_type):
installed = self.mgr.list_models(model_type=model_type)
print(f"Installed models of type `{model_type}`:")
for i in installed:
print(f"{i['model_name']}\t{i['base_model']}\t{i['path']}")
# logic here a little reversed to maintain backward compatibility
def starter_models(self, all_models: bool = False) -> Set[str]:
models = set()
for key, value in self.datasets.items():
name, base, model_type = ModelManager.parse_key(key)
if all_models or model_type in [ModelType.Main, ModelType.Vae]:
models.add(key)
return models
def recommended_models(self) -> Set[str]:
starters = self.starter_models(all_models=True)
return set([x for x in starters if self.datasets[x].get("recommended", False)])
def default_model(self) -> str:
starters = self.starter_models()
defaults = [x for x in starters if self.datasets[x].get("default", False)]
return defaults[0]
def install(self, selections: InstallSelections):
verbosity = dlogging.get_verbosity() # quench NSFW nags
dlogging.set_verbosity_error()
job = 1
jobs = len(selections.remove_models) + len(selections.install_models)
# remove requested models
for key in selections.remove_models:
name, base, mtype = self.mgr.parse_key(key)
logger.info(f"Deleting {mtype} model {name} [{job}/{jobs}]")
try:
self.mgr.del_model(name, base, mtype)
except FileNotFoundError as e:
logger.warning(e)
job += 1
# add requested models
for path in selections.install_models:
logger.info(f"Installing {path} [{job}/{jobs}]")
try:
self.heuristic_import(path)
except (ValueError, KeyError) as e:
logger.error(str(e))
job += 1
dlogging.set_verbosity(verbosity)
self.mgr.commit()
def heuristic_import(
self,
model_path_id_or_url: Union[str, Path],
models_installed: Set[Path] = None,
) -> Dict[str, AddModelResult]:
"""
:param model_path_id_or_url: A Path to a local model to import, or a string representing its repo_id or URL
:param models_installed: Set of installed models, used for recursive invocation
Returns a set of dict objects corresponding to newly-created stanzas in models.yaml.
"""
if not models_installed:
models_installed = dict()
# A little hack to allow nested routines to retrieve info on the requested ID
self.current_id = model_path_id_or_url
path = Path(model_path_id_or_url)
# checkpoint file, or similar
if path.is_file():
models_installed.update({str(path): self._install_path(path)})
# folders style or similar
elif path.is_dir() and any(
[
(path / x).exists()
for x in {"config.json", "model_index.json", "learned_embeds.bin", "pytorch_lora_weights.bin"}
]
):
models_installed.update({str(model_path_id_or_url): self._install_path(path)})
# recursive scan
elif path.is_dir():
for child in path.iterdir():
self.heuristic_import(child, models_installed=models_installed)
# huggingface repo
elif len(str(model_path_id_or_url).split("/")) == 2:
models_installed.update({str(model_path_id_or_url): self._install_repo(str(model_path_id_or_url))})
# a URL
elif str(model_path_id_or_url).startswith(("http:", "https:", "ftp:")):
models_installed.update({str(model_path_id_or_url): self._install_url(model_path_id_or_url)})
else:
raise KeyError(f"{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping")
return models_installed
# install a model from a local path. The optional info parameter is there to prevent
# the model from being probed twice in the event that it has already been probed.
def _install_path(self, path: Path, info: ModelProbeInfo = None) -> AddModelResult:
info = info or ModelProbe().heuristic_probe(path, self.prediction_helper)
if not info:
logger.warning(f"Unable to parse format of {path}")
return None
model_name = path.stem if path.is_file() else path.name
if self.mgr.model_exists(model_name, info.base_type, info.model_type):
raise ValueError(f'A model named "{model_name}" is already installed.')
attributes = self._make_attributes(path, info)
return self.mgr.add_model(
model_name=model_name,
base_model=info.base_type,
model_type=info.model_type,
model_attributes=attributes,
)
def _install_url(self, url: str) -> AddModelResult:
with TemporaryDirectory(dir=self.config.models_path) as staging:
location = download_with_resume(url, Path(staging))
if not location:
logger.error(f"Unable to download {url}. Skipping.")
info = ModelProbe().heuristic_probe(location)
dest = self.config.models_path / info.base_type.value / info.model_type.value / location.name
dest.parent.mkdir(parents=True, exist_ok=True)
models_path = shutil.move(location, dest)
# staged version will be garbage-collected at this time
return self._install_path(Path(models_path), info)
def _install_repo(self, repo_id: str) -> AddModelResult:
hinfo = HfApi().model_info(repo_id)
# we try to figure out how to download this most economically
# list all the files in the repo
files = [x.rfilename for x in hinfo.siblings]
location = None
with TemporaryDirectory(dir=self.config.models_path) as staging:
staging = Path(staging)
if "model_index.json" in files:
location = self._download_hf_pipeline(repo_id, staging) # pipeline
elif "unet/model.onnx" in files:
location = self._download_hf_model(repo_id, files, staging)
else:
for suffix in ["safetensors", "bin"]:
if f"pytorch_lora_weights.{suffix}" in files:
location = self._download_hf_model(repo_id, ["pytorch_lora_weights.bin"], staging) # LoRA
break
elif (
self.config.precision == "float16" and f"diffusion_pytorch_model.fp16.{suffix}" in files
): # vae, controlnet or some other standalone
files = ["config.json", f"diffusion_pytorch_model.fp16.{suffix}"]
location = self._download_hf_model(repo_id, files, staging)
break
elif f"diffusion_pytorch_model.{suffix}" in files:
files = ["config.json", f"diffusion_pytorch_model.{suffix}"]
location = self._download_hf_model(repo_id, files, staging)
break
elif f"learned_embeds.{suffix}" in files:
location = self._download_hf_model(repo_id, [f"learned_embeds.{suffix}"], staging)
break
if not location:
logger.warning(f"Could not determine type of repo {repo_id}. Skipping install.")
return {}
info = ModelProbe().heuristic_probe(location, self.prediction_helper)
if not info:
logger.warning(f"Could not probe {location}. Skipping install.")
return {}
dest = (
self.config.models_path
/ info.base_type.value
/ info.model_type.value
/ self._get_model_name(repo_id, location)
)
if dest.exists():
shutil.rmtree(dest)
shutil.copytree(location, dest)
return self._install_path(dest, info)
def _get_model_name(self, path_name: str, location: Path) -> str:
"""
Calculate a name for the model - primitive implementation.
"""
if key := self.reverse_paths.get(path_name):
(name, base, mtype) = ModelManager.parse_key(key)
return name
elif location.is_dir():
return location.name
else:
return location.stem
def _make_attributes(self, path: Path, info: ModelProbeInfo) -> dict:
model_name = path.name if path.is_dir() else path.stem
description = f"{info.base_type.value} {info.model_type.value} model {model_name}"
if key := self.reverse_paths.get(self.current_id):
if key in self.datasets:
description = self.datasets[key].get("description") or description
rel_path = self.relative_to_root(path, self.config.models_path)
attributes = dict(
path=str(rel_path),
description=str(description),
model_format=info.format,
)
legacy_conf = None
if info.model_type == ModelType.Main or info.model_type == ModelType.ONNX:
attributes.update(
dict(
variant=info.variant_type,
)
)
if info.format == "checkpoint":
try:
possible_conf = path.with_suffix(".yaml")
if possible_conf.exists():
legacy_conf = str(self.relative_to_root(possible_conf))
elif info.base_type == BaseModelType.StableDiffusion2:
legacy_conf = Path(
self.config.legacy_conf_dir,
LEGACY_CONFIGS[info.base_type][info.variant_type][info.prediction_type],
)
else:
legacy_conf = Path(
self.config.legacy_conf_dir, LEGACY_CONFIGS[info.base_type][info.variant_type]
)
except KeyError:
legacy_conf = Path(self.config.legacy_conf_dir, "v1-inference.yaml") # best guess
if info.model_type == ModelType.ControlNet and info.format == "checkpoint":
possible_conf = path.with_suffix(".yaml")
if possible_conf.exists():
legacy_conf = str(self.relative_to_root(possible_conf))
if legacy_conf:
attributes.update(dict(config=str(legacy_conf)))
return attributes
def relative_to_root(self, path: Path, root: Optional[Path] = None) -> Path:
root = root or self.config.root_path
if path.is_relative_to(root):
return path.relative_to(root)
else:
return path
def _download_hf_pipeline(self, repo_id: str, staging: Path) -> Path:
"""
This retrieves a StableDiffusion model from cache or remote and then
does a save_pretrained() to the indicated staging area.
"""
_, name = repo_id.split("/")
precision = torch_dtype(choose_torch_device())
variants = ["fp16", None] if precision == torch.float16 else [None, "fp16"]
model = None
for variant in variants:
try:
model = DiffusionPipeline.from_pretrained(
repo_id,
variant=variant,
torch_dtype=precision,
safety_checker=None,
)
except Exception as e: # most errors are due to fp16 not being present. Fix this to catch other errors
if "fp16" not in str(e):
print(e)
if model:
break
if not model:
logger.error(f"Diffusers model {repo_id} could not be downloaded. Skipping.")
return None
model.save_pretrained(staging / name, safe_serialization=True)
return staging / name
def _download_hf_model(self, repo_id: str, files: List[str], staging: Path) -> Path:
_, name = repo_id.split("/")
location = staging / name
paths = list()
for filename in files:
filePath = Path(filename)
p = hf_download_with_resume(
repo_id,
model_dir=location / filePath.parent,
model_name=filePath.name,
access_token=self.access_token,
subfolder=filePath.parent,
)
if p:
paths.append(p)
else:
logger.warning(f"Could not download {filename} from {repo_id}.")
return location if len(paths) > 0 else None
@classmethod
def _reverse_paths(cls, datasets) -> dict:
"""
Reverse mapping from repo_id/path to destination name.
"""
return {v.get("path") or v.get("repo_id"): k for k, v in datasets.items()}
# -------------------------------------
def yes_or_no(prompt: str, default_yes=True):
default = "y" if default_yes else "n"
response = input(f"{prompt} [{default}] ") or default
if default_yes:
return response[0] not in ("n", "N")
else:
return response[0] in ("y", "Y")
# ---------------------------------------------
def hf_download_from_pretrained(model_class: object, model_name: str, destination: Path, **kwargs):
logger = InvokeAILogger.getLogger("InvokeAI")
logger.addFilter(lambda x: "fp16 is not a valid" not in x.getMessage())
model = model_class.from_pretrained(
model_name,
resume_download=True,
**kwargs,
)
model.save_pretrained(destination, safe_serialization=True)
return destination
# ---------------------------------------------
def hf_download_with_resume(
repo_id: str,
model_dir: str,
model_name: str,
model_dest: Path = None,
access_token: str = None,
subfolder: str = None,
) -> Path:
model_dest = model_dest or Path(os.path.join(model_dir, model_name))
os.makedirs(model_dir, exist_ok=True)
url = hf_hub_url(repo_id, model_name, subfolder=subfolder)
header = {"Authorization": f"Bearer {access_token}"} if access_token else {}
open_mode = "wb"
exist_size = 0
if os.path.exists(model_dest):
exist_size = os.path.getsize(model_dest)
header["Range"] = f"bytes={exist_size}-"
open_mode = "ab"
resp = requests.get(url, headers=header, stream=True)
total = int(resp.headers.get("content-length", 0))
if resp.status_code == 416: # "range not satisfiable", which means nothing to return
logger.info(f"{model_name}: complete file found. Skipping.")
return model_dest
elif resp.status_code == 404:
logger.warning("File not found")
return None
elif resp.status_code != 200:
logger.warning(f"{model_name}: {resp.reason}")
elif exist_size > 0:
logger.info(f"{model_name}: partial file found. Resuming...")
else:
logger.info(f"{model_name}: Downloading...")
try:
with open(model_dest, open_mode) as file, tqdm(
desc=model_name,
initial=exist_size,
total=total + exist_size,
unit="iB",
unit_scale=True,
unit_divisor=1000,
) as bar:
for data in resp.iter_content(chunk_size=1024):
size = file.write(data)
bar.update(size)
except Exception as e:
logger.error(f"An error occurred while downloading {model_name}: {str(e)}")
return None
return model_dest

View File

@ -17,7 +17,6 @@ context. Use like this:
"""
import gc
import hashlib
import os
import sys
from contextlib import suppress
@ -29,6 +28,7 @@ import torch
import invokeai.backend.util.logging as logger
from ..util import GIG
from ..util.devices import choose_torch_device
from .models import BaseModelType, ModelBase, ModelType, SubModelType
@ -42,9 +42,6 @@ DEFAULT_MAX_CACHE_SIZE = 6.0
# amount of GPU memory to hold in reserve for use by generations (GB)
DEFAULT_MAX_VRAM_CACHE_SIZE = 2.75
# actual size of a gig
GIG = 1073741824
@dataclass
class CacheStats(object):

View File

@ -2,7 +2,6 @@
"""Model loader for InvokeAI."""
import hashlib
import shutil
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import Path
@ -11,9 +10,9 @@ from typing import List, Optional, Union
import torch
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.util import InvokeAILogger, choose_precision, choose_torch_device, directory_size
from invokeai.backend.util import InvokeAILogger, choose_precision, choose_torch_device
from .cache import GIG, CacheStats, ModelCache, ModelLocker
from .cache import CacheStats, ModelCache, ModelLocker
from .config import BaseModelType, ModelConfigBase, ModelType, SubModelType
from .download import DownloadEventHandler
from .install import ModelInstall, ModelInstallBase
@ -236,7 +235,6 @@ class ModelLoad(ModelLoadBase):
model_config=model_config,
output_path=dst_convert_path,
)
self._trim_model_convert_cache() # keeps cache size under control
model_context = self._cache.get_model(
model_path=model_path,
@ -276,34 +274,6 @@ class ModelLoad(ModelLoadBase):
def _get_model_convert_cache_path(self, model_path):
return self.resolve_model_path(Path(".cache") / hashlib.md5(str(model_path).encode()).hexdigest())
def _trim_model_convert_cache(self):
max_cache_size = self._app_config.conversion_cache_size * GIG
cache_path = self.resolve_model_path(Path(".cache"))
current_size = directory_size(cache_path)
if current_size <= max_cache_size:
return
self.logger.debug("Convert cache has gotten too large. Trimming.")
# For this to work, we make the assumption that the directory contains
# either a 'unet/config.json' file, or a 'config.json' file at top level
def by_atime(path: Path) -> float:
for config in ["unet/config.json", "config.json"]:
sentinel = path / sentinel
if sentinel.exists():
return sentinel.stat().m_atime
return 0.0
# sort by last access time - least accessed files will be at the end
lru_models = sorted(cache_dir.iterdir(), key=by_atime, reverse=True)
while current_size > max_cache_size:
next_victim = lru_models.pop()
victim_size = directory_size(next_victim)
self.logger.debug(f"Removing cached converted model {next_victim} to free {victim_size / GIG} GB")
shutil.rmtree(next_victim)
current_size -= victim_size
def _get_model_path(
self, model_config: ModelConfigBase, submodel_type: Optional[SubModelType] = None
) -> (Path, bool):

View File

@ -1,6 +1,7 @@
import inspect
import json
import os
import shutil
import sys
import typing
from abc import ABCMeta, abstractmethod
@ -18,6 +19,9 @@ from onnx import numpy_helper
from onnxruntime import InferenceSession, SessionOptions, get_available_providers
from picklescan.scanner import scan_file_path
from invokeai.backend.util import GIG, directory_size
from invokeai.backend.util.logging import InvokeAILogger
from ..config import ( # noqa F401
BaseModelType,
ModelConfigBase,
@ -598,3 +602,34 @@ class IAIOnnxRuntimeModel:
# TODO: session options
return cls(model_path, provider=provider)
def trim_model_convert_cache(cache_path: Path, max_cache_size: int):
current_size = directory_size(cache_path)
logger = InvokeAILogger.getLogger()
if current_size <= max_cache_size:
return
logger.debug(
"Convert cache has gotten too large {(current_size / GIG):4.2f} > {(max_cache_size / GIG):4.2f}G.. Trimming."
)
# For this to work, we make the assumption that the directory contains
# either a 'unet/config.json' file, or a 'config.json' file at top level
def by_atime(path: Path) -> float:
for config in ["unet/config.json", "config.json"]:
sentinel = path / config
if sentinel.exists():
return sentinel.stat().st_atime
return 0.0
# sort by last access time - least accessed files will be at the end
lru_models = sorted(cache_path.iterdir(), key=by_atime, reverse=True)
logger.debug(f"cached models in descending atime order: {lru_models}")
while current_size > max_cache_size and len(lru_models) > 0:
next_victim = lru_models.pop()
victim_size = directory_size(next_victim)
logger.debug(f"Removing cached converted model {next_victim} to free {victim_size / GIG} GB")
shutil.rmtree(next_victim)
current_size -= victim_size

View File

@ -10,6 +10,7 @@ from invokeai.app.services.config import InvokeAIAppConfig
from ..config import ControlNetCheckpointConfig, ControlNetDiffusersConfig
from .base import (
GIG,
BaseModelType,
EmptyConfigLoader,
InvalidModelException,
@ -127,6 +128,7 @@ class ControlNetModel(ModelBase):
def _convert_controlnet_ckpt_and_cache(
model_config: ControlNetCheckpointConfig,
output_path: str,
max_cache_size: int,
) -> str:
"""
Convert the controlnet from checkpoint format to diffusers format,
@ -142,6 +144,11 @@ def _convert_controlnet_ckpt_and_cache(
if output_path.exists():
return output_path
# make sufficient size in the cache folder
size_needed = weights.stat().st_size
max_cache_size = (app_config.conversion_cache_size * GIG,)
trim_model_convert_cache(output_path.parent, max_cache_size - size_needed)
# to avoid circular import errors
from ..convert_ckpt_to_diffusers import convert_controlnet_to_diffusers

View File

@ -11,6 +11,7 @@ from pydantic import Field
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from ..cache import GIG
from ..config import MainCheckpointConfig, MainDiffusersConfig, SilenceWarnings
from .base import (
BaseModelType,
@ -22,6 +23,7 @@ from .base import (
ModelVariantType,
classproperty,
read_checkpoint_meta,
trim_model_convert_cache,
)
@ -241,11 +243,16 @@ def _convert_ckpt_and_cache(
output_path = Path(output_path)
variant = model_config.variant
pipeline_class = StableDiffusionInpaintPipeline if variant == "inpaint" else StableDiffusionPipeline
max_cache_size = app_config.conversion_cache_size * GIG
# return cached version if it exists
if output_path.exists():
return output_path
# make sufficient size in the cache folder
size_needed = weights.stat().st_size
trim_model_convert_cache(output_path.parent, max_cache_size - size_needed)
# to avoid circular import errors
from ...util.devices import choose_torch_device, torch_dtype
from ..convert_ckpt_to_diffusers import convert_ckpt_to_diffusers

View File

@ -72,8 +72,8 @@ class ONNXStableDiffusion1Model(DiffusersModel):
cls,
model_path: str,
output_path: str,
config: ModelConfigBase,
base_model: BaseModelType,
# config: ModelConfigBase, # not used?
# base_model: BaseModelType, # not used?
) -> str:
return model_path

View File

@ -11,6 +11,7 @@ from invokeai.app.services.config import InvokeAIAppConfig
from ..config import VaeCheckpointConfig, VaeDiffusersConfig
from .base import (
GIG,
BaseModelType,
EmptyConfigLoader,
InvalidModelException,
@ -23,6 +24,7 @@ from .base import (
calc_model_size_by_data,
calc_model_size_by_fs,
classproperty,
trim_model_convert_cache,
)
@ -116,6 +118,7 @@ class VaeModel(ModelBase):
def _convert_vae_ckpt_and_cache(
model_config: ModelConfigBase,
output_path: str,
max_cache_size: int,
) -> str:
"""
Convert the VAE indicated in mconfig into a diffusers AutoencoderKL
@ -145,6 +148,11 @@ def _convert_vae_ckpt_and_cache(
if output_path.exists():
return output_path
# make sufficient size in the cache folder
size_needed = weights_path.stat().st_size
max_cache_size = (app_config.conversion_cache_size * GIG,)
trim_model_convert_cache(output_path.parent, max_cache_size - size_needed)
base_model = model_config.base_model
if base_model in {BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2}:
from .stable_diffusion import _select_ckpt_config

View File

@ -13,6 +13,7 @@ from .devices import ( # noqa: F401
)
from .logging import InvokeAILogger # noqa: F401
from .util import ( # noqa: F401
GIG,
Chdir,
ask_user,
directory_size,

View File

@ -21,6 +21,9 @@ import invokeai.backend.util.logging as logger
from .devices import torch_dtype
# actual size of a gig
GIG = 1073741824
def log_txt_as_img(wh, xc, size=10):
# wh a tuple of (width, height)

View File

@ -21,17 +21,13 @@ from typing import Dict, List, Optional, Tuple
import npyscreen
import omegaconf
import torch
from huggingface_hub import HfFolder
from npyscreen import widget
from pydantic import BaseModel
from tqdm import tqdm
import invokeai.configs as configs
from invokeai.app.services.config import InvokeAIAppConfig
# from invokeai.backend.install.model_install_backend import InstallSelections, ModelInstall, SchedulerPredictionType
from invokeai.backend.install.install_helper import InstallHelper
from invokeai.backend.model_manager import BaseModelType, ModelInstall, ModelInstallJob, ModelType
from invokeai.backend.model_manager.install import ModelSourceMetadata
from invokeai.backend.util import choose_precision, choose_torch_device
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.frontend.install.widgets import (
@ -58,12 +54,6 @@ NOPRINT_TRANS_TABLE = {i: None for i in range(0, sys.maxunicode + 1) if not chr(
# maximum number of installed models we can display before overflowing vertically
MAX_OTHER_MODELS = 72
# name of the starter models file
INITIAL_MODELS = "INITIAL_MODELS.yaml"
INITIAL_MODELS_CONFIG = omegaconf.OmegaConf.load(Path(configs.__path__[0]) / INITIAL_MODELS)
ACCESS_TOKEN = HfFolder.get_token()
class UnifiedModelInfo(BaseModel):
name: Optional[str] = None
@ -102,8 +92,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
super().__init__(parentApp=parentApp, name=name, *args, **keywords)
def create(self):
self.installer = self.parentApp.installer
self.initialize_model_lists()
self.installer = self.parentApp.install_helper.installer
self.model_labels = self._get_model_labels()
self.keypress_timeout = 10
self.counter = 0
@ -374,52 +363,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self.__class__.current_tab = selected_tab # for persistence
self.display()
def initialize_model_lists(self):
"""
Initialize our model slots.
Set up the following:
self.installed_models -- list of installed model keys
self.starter_models -- list of starter model keys from INITIAL_MODELS
self.all_models -- dict of key => UnifiedModelInfo
Each of these is a dict of key=>ModelConfigBase.
"""
installed_models = list()
starter_models = list()
all_models = dict()
# previously-installed models
for model in self.installer.store.all_models():
info = UnifiedModelInfo.parse_obj(model.dict())
info.installed = True
key = f"{model.base_model.value}/{model.model_type.value}/{model.name}"
all_models[key] = info
installed_models.append(key)
for key in INITIAL_MODELS_CONFIG.keys():
if key in all_models:
# we want to preserve the description
description = all_models[key].description or INITIAL_MODELS_CONFIG[key].get("description")
all_models[key].description = description
else:
base_model, model_type, model_name = key.split("/")
info = UnifiedModelInfo(
name=model_name,
model_type=model_type,
base_model=base_model,
source=INITIAL_MODELS_CONFIG[key].source,
description=INITIAL_MODELS_CONFIG[key].get("description"),
recommended=INITIAL_MODELS_CONFIG[key].get("recommended", False),
default=INITIAL_MODELS_CONFIG[key].get("default", False),
)
all_models[key] = info
starter_models.append(key)
self.installed_models = installed_models
self.starter_models = starter_models
self.all_models = all_models
def _get_model_labels(self) -> dict[str, str]:
"""Return a list of trimmed labels for all models."""
window_width, window_height = get_terminal_size()
@ -459,6 +402,18 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
else:
return True
@property
def all_models(self) -> Dict[str, UnifiedModelInfo]:
return self.parentApp.install_helper.all_models
@property
def starter_models(self) -> List[str]:
return self.parentApp.install_helper._starter_models
@property
def installed_models(self) -> List[str]:
return self.parentApp.install_helper._installed_models
def on_back(self):
self.parentApp.switchFormPrevious()
self.editing = False
@ -514,12 +469,12 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
class AddModelApplication(npyscreen.NPSAppManaged):
def __init__(self, opt: Namespace, installer: ModelInstall):
def __init__(self, opt: Namespace, install_helper: InstallHelper):
super().__init__()
self.program_opts = opt
self.user_cancelled = False
self.install_selections = InstallSelections()
self.installer = installer
self.install_helper = install_helper
def onStart(self):
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
@ -540,66 +495,13 @@ def list_models(installer: ModelInstall, model_type: ModelType):
print(f"{model.name:40}{model.base_model.value:14}{path}")
class TqdmProgress(object):
_bars: Dict[int, tqdm] # the tqdm object
_last: Dict[int, int] # last bytes downloaded
def __init__(self):
self._bars = dict()
self._last = dict()
def job_update(self, job: ModelInstallJob):
job_id = job.id
if job.status == "running":
if job_id not in self._bars:
dest = Path(job.destination).name
self._bars[job_id] = tqdm(
desc=dest,
initial=0,
total=job.total_bytes,
unit="iB",
unit_scale=True,
)
self._last[job_id] = 0
self._bars[job_id].update(job.bytes - self._last[job_id])
self._last[job_id] = job.bytes
def add_or_delete(installer: ModelInstall, selections: InstallSelections):
for model in selections.install_models:
metadata = ModelSourceMetadata(description=model.description, name=model.name)
installer.install(
model.source,
variant="fp16" if config.precision == "float16" else None,
access_token=ACCESS_TOKEN, # this is a global,
metadata=metadata,
)
for model in selections.remove_models:
parts = model.split("/")
if len(parts) == 1:
base_model, model_type, model_name = (None, None, model)
else:
base_model, model_type, model_name = parts
matches = installer.store.search_by_name(base_model=base_model, model_type=model_type, model_name=model_name)
if len(matches) > 1:
print(f"{model} is ambiguous. Please use model_type:model_name (e.g. main:my_model) to disambiguate.")
elif not matches:
print(f"{model}: unknown model")
else:
for m in matches:
print(f"Deleting {m.model_type}:{m.name}")
installer.conditionally_delete(m.key)
installer.wait_for_installs()
# --------------------------------------------------------
def select_and_download_models(opt: Namespace):
"""Prompt user for install/delete selections and execute."""
precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
config.precision = precision
installer = ModelInstall(config=config, event_handlers=[TqdmProgress().job_update])
install_helper = InstallHelper(config)
installer = install_helper.installer
if opt.list_models:
list_models(installer, opt.list_models)
@ -608,33 +510,31 @@ def select_and_download_models(opt: Namespace):
selections = InstallSelections(
install_models=[UnifiedModelInfo(source=x) for x in (opt.add or [])], remove_models=opt.delete or []
)
add_or_delete(installer, selections)
install_helper.add_or_delete(selections)
elif opt.default_only:
selections = InstallSelections(install_models=installer.default_model())
add_or_delete(installer, selections)
selections = InstallSelections(install_models=[initial_models.default_model()])
install_helper.add_or_delete(selections)
elif opt.yes_to_all:
selections = InstallSelections(install_models=installer.recommended_models())
add_or_delete(installer, selections)
selections = InstallSelections(install_models=initial_models.recommended_models())
install_helper.add_or_delete(selections)
# this is where the TUI is called
else:
# needed to support the probe() method running under a subprocess
torch.multiprocessing.set_start_method("spawn")
if not set_min_terminal_size(MIN_COLS, MIN_LINES):
raise WindowTooSmallException(
"Could not increase terminal size. Try running again with a larger window or smaller font size."
)
installApp = AddModelApplication(opt, installer)
installApp = AddModelApplication(opt, install_helper)
try:
installApp.run()
except KeyboardInterrupt as e:
print("Aborted...")
sys.exit(-1)
add_or_delete(installer, installApp.install_selections)
install_helper.add_or_delete(installApp.install_selections)
# -------------------------------------
@ -675,14 +575,6 @@ def main():
choices=[x.value for x in ModelType],
help="list installed models",
)
parser.add_argument(
"--config_file",
"-c",
dest="config_file",
type=str,
default=None,
help="path to configuration file to create",
)
parser.add_argument(
"--root_dir",
dest="root",