Merge branch 'main' of github.com:invoke-ai/InvokeAI

This commit is contained in:
Lincoln Stein 2023-07-30 09:19:14 -04:00
commit 7d7a9273ed
12 changed files with 129 additions and 101 deletions

View File

@ -149,7 +149,7 @@ class Installer:
return venv_dir
def install(
self, root: str = "~/invokeai-3", version: str = "latest", yes_to_all=False, find_links: Path = None
self, root: str = "~/invokeai", version: str = "latest", yes_to_all=False, find_links: Path = None
) -> None:
"""
Install the InvokeAI application into the given runtime path
@ -168,7 +168,8 @@ class Installer:
messages.welcome()
self.dest = Path(root).expanduser().resolve() if yes_to_all else messages.dest_path(root)
default_path = os.environ.get("INVOKEAI_ROOT") or Path(root).expanduser().resolve()
self.dest = default_path if yes_to_all else messages.dest_path(root)
# create the venv for the app
self.venv = self.app_venv()

View File

@ -3,6 +3,7 @@ InvokeAI Installer
"""
import argparse
import os
from pathlib import Path
from installer import Installer
@ -15,7 +16,7 @@ if __name__ == "__main__":
dest="root",
type=str,
help="Destination path for installation",
default="~/invokeai",
default=os.environ.get("INVOKEAI_ROOT") or "~/invokeai",
)
parser.add_argument(
"-y",

View File

@ -171,7 +171,6 @@ from pydantic import BaseSettings, Field, parse_obj_as
from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_type_hints, get_args
INIT_FILE = Path("invokeai.yaml")
MODEL_CORE = Path("models/core")
DB_FILE = Path("invokeai.db")
LEGACY_INIT_FILE = Path("invokeai.init")
@ -357,7 +356,7 @@ def _find_root() -> Path:
venv = Path(os.environ.get("VIRTUAL_ENV") or ".")
if os.environ.get("INVOKEAI_ROOT"):
root = Path(os.environ.get("INVOKEAI_ROOT")).resolve()
elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE, MODEL_CORE]]):
elif any([(venv.parent / x).exists() for x in [INIT_FILE, LEGACY_INIT_FILE]]):
root = (venv.parent).resolve()
else:
root = Path("~/invokeai").expanduser().resolve()

View File

@ -181,7 +181,7 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
def download_conversion_models():
target_dir = config.root_path / "models/core/convert"
target_dir = config.models_path / "core/convert"
kwargs = dict() # for future use
try:
logger.info("Downloading core tokenizers and text encoders")

View File

@ -128,7 +128,9 @@ class ModelInstall(object):
model_dict[key] = ModelLoadInfo(**value)
# supplement with entries in models.yaml
installed_models = self.mgr.list_models()
installed_models = [x for x in self.mgr.list_models()]
# suppresses autoloaded models
# installed_models = [x for x in self.mgr.list_models() if not self._is_autoloaded(x)]
for md in installed_models:
base = md["base_model"]
@ -147,6 +149,17 @@ class ModelInstall(object):
)
return {x: model_dict[x] for x in sorted(model_dict.keys(), key=lambda y: model_dict[y].name.lower())}
def _is_autoloaded(self, model_info: dict) -> bool:
path = model_info.get("path")
if not path:
return False
for autodir in ["autoimport_dir", "lora_dir", "embedding_dir", "controlnet_dir"]:
if autodir_path := getattr(self.config, autodir):
autodir_path = self.config.root_path / autodir_path
if Path(path).is_relative_to(autodir_path):
return True
return False
def list_models(self, model_type):
installed = self.mgr.list_models(model_type=model_type)
print(f"Installed models of type `{model_type}`:")
@ -273,6 +286,7 @@ class ModelInstall(object):
logger.error(f"Unable to download {url}. Skipping.")
info = ModelProbe().heuristic_probe(location)
dest = self.config.models_path / info.base_type.value / info.model_type.value / location.name
dest.parent.mkdir(parents=True, exist_ok=True)
models_path = shutil.move(location, dest)
# staged version will be garbage-collected at this time
@ -346,7 +360,7 @@ class ModelInstall(object):
if key in self.datasets:
description = self.datasets[key].get("description") or description
rel_path = self.relative_to_root(path)
rel_path = self.relative_to_root(path, self.config.models_path)
attributes = dict(
path=str(rel_path),
@ -386,8 +400,8 @@ class ModelInstall(object):
attributes.update(dict(config=str(legacy_conf)))
return attributes
def relative_to_root(self, path: Path) -> Path:
root = self.config.root_path
def relative_to_root(self, path: Path, root: None) -> Path:
root = root or self.config.root_path
if path.is_relative_to(root):
return path.relative_to(root)
else:

View File

@ -63,7 +63,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionS
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from invokeai.backend.util.logging import InvokeAILogger
from invokeai.app.services.config import InvokeAIAppConfig, MODEL_CORE
from invokeai.app.services.config import InvokeAIAppConfig
from picklescan.scanner import scan_file_path
from .models import BaseModelType, ModelVariantType
@ -81,7 +81,7 @@ if is_accelerate_available():
from accelerate.utils import set_module_tensor_to_device
logger = InvokeAILogger.getLogger(__name__)
CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().root_path / MODEL_CORE / "convert"
CONVERT_MODEL_ROOT = InvokeAIAppConfig.get_config().models_path / "core/convert"
def shave_segments(path, n_shave_prefix_segments=1):
@ -1070,7 +1070,7 @@ def convert_controlnet_checkpoint(
extract_ema,
use_linear_projection=None,
cross_attention_dim=None,
precision: torch.dtype = torch.float32,
precision: Optional[torch.dtype] = None,
):
ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True)
ctrlnet_config["upcast_attention"] = upcast_attention
@ -1111,7 +1111,6 @@ def convert_controlnet_checkpoint(
return controlnet.to(precision)
# TO DO - PASS PRECISION
def download_from_original_stable_diffusion_ckpt(
checkpoint_path: str,
model_version: BaseModelType,
@ -1121,7 +1120,7 @@ def download_from_original_stable_diffusion_ckpt(
prediction_type: str = None,
model_type: str = None,
extract_ema: bool = False,
precision: torch.dtype = torch.float32,
precision: Optional[torch.dtype] = None,
scheduler_type: str = "pndm",
num_in_channels: Optional[int] = None,
upcast_attention: Optional[bool] = None,
@ -1194,6 +1193,8 @@ def download_from_original_stable_diffusion_ckpt(
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer)
to use. If this parameter is `None`, the function will load a new instance of [CLIPTokenizer] by itself, if
needed.
precision (`torch.dtype`, *optional*, defauts to `None`):
If not provided the precision will be set to the precision of the original file.
return: A StableDiffusionPipeline object representing the passed-in `.ckpt`/`.safetensors` file.
"""
@ -1252,6 +1253,10 @@ def download_from_original_stable_diffusion_ckpt(
logger.debug(f"model_type = {model_type}; original_config_file = {original_config_file}")
precision_probing_key = "model.diffusion_model.input_blocks.0.0.bias"
logger.debug(f"original checkpoint precision == {checkpoint[precision_probing_key].dtype}")
precision = precision or checkpoint[precision_probing_key].dtype
if original_config_file is None:
key_name_v2_1 = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
key_name_sd_xl_base = "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias"
@ -1279,9 +1284,12 @@ def download_from_original_stable_diffusion_ckpt(
original_config_file = BytesIO(requests.get(config_url).content)
original_config = OmegaConf.load(original_config_file)
if original_config["model"]["params"].get("use_ema") is not None:
extract_ema = original_config["model"]["params"]["use_ema"]
if (
model_version == BaseModelType.StableDiffusion2
and original_config["model"]["params"]["parameterization"] == "v"
and original_config["model"]["params"].get("parameterization") == "v"
):
prediction_type = "v_prediction"
upcast_attention = True
@ -1447,7 +1455,7 @@ def download_from_original_stable_diffusion_ckpt(
if controlnet:
pipe = pipeline_class(
vae=vae.to(precision),
text_encoder=text_model,
text_encoder=text_model.to(precision),
tokenizer=tokenizer,
unet=unet.to(precision),
scheduler=scheduler,
@ -1459,7 +1467,7 @@ def download_from_original_stable_diffusion_ckpt(
else:
pipe = pipeline_class(
vae=vae.to(precision),
text_encoder=text_model,
text_encoder=text_model.to(precision),
tokenizer=tokenizer,
unet=unet.to(precision),
scheduler=scheduler,
@ -1484,8 +1492,8 @@ def download_from_original_stable_diffusion_ckpt(
image_noising_scheduler=image_noising_scheduler,
# regular denoising components
tokenizer=tokenizer,
text_encoder=text_model,
unet=unet,
text_encoder=text_model.to(precision),
unet=unet.to(precision),
scheduler=scheduler,
# vae
vae=vae,
@ -1560,7 +1568,7 @@ def download_from_original_stable_diffusion_ckpt(
if controlnet:
pipe = pipeline_class(
vae=vae.to(precision),
text_encoder=text_model,
text_encoder=text_model.to(precision),
tokenizer=tokenizer,
unet=unet.to(precision),
controlnet=controlnet,
@ -1571,7 +1579,7 @@ def download_from_original_stable_diffusion_ckpt(
else:
pipe = pipeline_class(
vae=vae.to(precision),
text_encoder=text_model,
text_encoder=text_model.to(precision),
tokenizer=tokenizer,
unet=unet.to(precision),
scheduler=scheduler,
@ -1594,9 +1602,9 @@ def download_from_original_stable_diffusion_ckpt(
pipe = StableDiffusionXLPipeline(
vae=vae.to(precision),
text_encoder=text_encoder,
text_encoder=text_encoder.to(precision),
tokenizer=tokenizer,
text_encoder_2=text_encoder_2,
text_encoder_2=text_encoder_2.to(precision),
tokenizer_2=tokenizer_2,
unet=unet.to(precision),
scheduler=scheduler,
@ -1639,7 +1647,7 @@ def download_controlnet_from_original_ckpt(
original_config_file: str,
image_size: int = 512,
extract_ema: bool = False,
precision: torch.dtype = torch.float32,
precision: Optional[torch.dtype] = None,
num_in_channels: Optional[int] = None,
upcast_attention: Optional[bool] = None,
device: str = None,
@ -1680,6 +1688,12 @@ def download_controlnet_from_original_ckpt(
while "state_dict" in checkpoint:
checkpoint = checkpoint["state_dict"]
# use original precision
precision_probing_key = "input_blocks.0.0.bias"
ckpt_precision = checkpoint[precision_probing_key].dtype
logger.debug(f"original controlnet precision = {ckpt_precision}")
precision = precision or ckpt_precision
original_config = OmegaConf.load(original_config_file)
if num_in_channels is not None:
@ -1699,7 +1713,7 @@ def download_controlnet_from_original_ckpt(
cross_attention_dim=cross_attention_dim,
)
return controlnet
return controlnet.to(precision)
def convert_ldm_vae_to_diffusers(checkpoint, vae_config: DictConfig, image_size: int) -> AutoencoderKL:

View File

@ -187,7 +187,9 @@ class ModelCache(object):
# TODO: lock for no copies on simultaneous calls?
cache_entry = self._cached_models.get(key, None)
if cache_entry is None:
self.logger.info(f"Loading model {model_path}, type {base_model}:{model_type}:{submodel}")
self.logger.info(
f"Loading model {model_path}, type {base_model.value}:{model_type.value}:{submodel.value if submodel else ''}"
)
# this will remove older cached models until
# there is sufficient room to load the requested model

View File

@ -423,7 +423,7 @@ class ModelManager(object):
return (model_name, base_model, model_type)
def _get_model_cache_path(self, model_path):
return self.app_config.models_path / ".cache" / hashlib.md5(str(model_path).encode()).hexdigest()
return self.resolve_model_path(Path(".cache") / hashlib.md5(str(model_path).encode()).hexdigest())
@classmethod
def initialize_model_config(cls, config_path: Path):
@ -456,7 +456,7 @@ class ModelManager(object):
raise ModelNotFoundException(f"Model not found - {model_key}")
model_config = self.models[model_key]
model_path = self.app_config.root_path / model_config.path
model_path = self.resolve_model_path(model_config.path)
if not model_path.exists():
if model_class.save_to_config:
@ -586,7 +586,7 @@ class ModelManager(object):
# expose paths as absolute to help web UI
if path := model_dict.get("path"):
model_dict["path"] = str(self.app_config.root_path / path)
model_dict["path"] = str(self.resolve_model_path(path))
models.append(model_dict)
return models
@ -623,7 +623,7 @@ class ModelManager(object):
self.cache.uncache_model(cache_id)
# if model inside invoke models folder - delete files
model_path = self.app_config.root_path / model_cfg.path
model_path = self.resolve_model_path(model_cfg.path)
cache_path = self._get_model_cache_path(model_path)
if cache_path.exists():
rmtree(str(cache_path))
@ -654,10 +654,9 @@ class ModelManager(object):
The returned dict has the same format as the dict returned by
model_info().
"""
# relativize paths as they go in - this makes it easier to move the root directory around
# relativize paths as they go in - this makes it easier to move the models directory around
if path := model_attributes.get("path"):
if Path(path).is_relative_to(self.app_config.root_path):
model_attributes["path"] = str(Path(path).relative_to(self.app_config.root_path))
model_attributes["path"] = str(self.relative_model_path(Path(path)))
model_class = MODEL_CLASSES[base_model][model_type]
model_config = model_class.create_config(**model_attributes)
@ -715,7 +714,7 @@ class ModelManager(object):
if not model_cfg:
raise ModelNotFoundException(f"Unknown model: {model_key}")
old_path = self.app_config.root_path / model_cfg.path
old_path = self.resolve_model_path(model_cfg.path)
new_name = new_name or model_name
new_base = new_base or base_model
new_key = self.create_key(new_name, new_base, model_type)
@ -724,15 +723,15 @@ class ModelManager(object):
# if this is a model file/directory that we manage ourselves, we need to move it
if old_path.is_relative_to(self.app_config.models_path):
new_path = (
self.app_config.root_path
/ "models"
/ BaseModelType(new_base).value
/ ModelType(model_type).value
/ new_name
new_path = self.resolve_model_path(
Path(
BaseModelType(new_base).value,
ModelType(model_type).value,
new_name,
)
)
move(old_path, new_path)
model_cfg.path = str(new_path.relative_to(self.app_config.root_path))
model_cfg.path = str(new_path.relative_to(self.app_config.models_path))
# clean up caches
old_model_cache = self._get_model_cache_path(old_path)
@ -782,7 +781,7 @@ class ModelManager(object):
**submodel,
)
checkpoint_path = self.app_config.root_path / info["path"]
old_diffusers_path = self.app_config.models_path / model.location
old_diffusers_path = self.resolve_model_path(model.location)
new_diffusers_path = (
dest_directory or self.app_config.models_path / base_model.value / model_type.value
) / model_name
@ -795,7 +794,7 @@ class ModelManager(object):
info["path"] = (
str(new_diffusers_path)
if dest_directory
else str(new_diffusers_path.relative_to(self.app_config.root_path))
else str(new_diffusers_path.relative_to(self.app_config.models_path))
)
info.pop("config")
@ -810,6 +809,15 @@ class ModelManager(object):
return result
def resolve_model_path(self, path: Union[Path, str]) -> Path:
"""return relative paths based on configured models_path"""
return self.app_config.models_path / path
def relative_model_path(self, model_path: Path) -> Path:
if model_path.is_relative_to(self.app_config.models_path):
model_path = model_path.relative_to(self.app_config.models_path)
return model_path
def search_models(self, search_folder):
self.logger.info(f"Finding Models In: {search_folder}")
models_folder_ckpt = Path(search_folder).glob("**/*.ckpt")
@ -883,10 +891,17 @@ class ModelManager(object):
new_models_found = False
self.logger.info(f"Scanning {self.app_config.models_path} for new models")
with Chdir(self.app_config.root_path):
with Chdir(self.app_config.models_path):
for model_key, model_config in list(self.models.items()):
model_name, cur_base_model, cur_model_type = self.parse_key(model_key)
model_path = self.app_config.root_path.absolute() / model_config.path
# Patch for relative path bug in older models.yaml - paths should not
# be starting with a hard-coded 'models'. This will also fix up
# models.yaml when committed.
if model_config.path.startswith("models"):
model_config.path = str(Path(*Path(model_config.path).parts[1:]))
model_path = self.resolve_model_path(model_config.path).absolute()
if not model_path.exists():
model_class = MODEL_CLASSES[cur_base_model][cur_model_type]
if model_class.save_to_config:
@ -905,7 +920,7 @@ class ModelManager(object):
if model_type is not None and cur_model_type != model_type:
continue
model_class = MODEL_CLASSES[cur_base_model][cur_model_type]
models_dir = self.app_config.models_path / cur_base_model.value / cur_model_type.value
models_dir = self.resolve_model_path(Path(cur_base_model.value, cur_model_type.value))
if not models_dir.exists():
continue # TODO: or create all folders?
@ -919,9 +934,7 @@ class ModelManager(object):
if model_key in self.models:
raise DuplicateModelException(f"Model with key {model_key} added twice")
if model_path.is_relative_to(self.app_config.root_path):
model_path = model_path.relative_to(self.app_config.root_path)
model_path = self.relative_model_path(model_path)
model_config: ModelConfigBase = model_class.probe_config(str(model_path))
self.models[model_key] = model_config
new_models_found = True
@ -932,12 +945,11 @@ class ModelManager(object):
except NotImplementedError as e:
self.logger.warning(e)
imported_models = self.autoimport()
imported_models = self.scan_autoimport_directory()
if (new_models_found or imported_models) and self.config_path:
self.commit()
def autoimport(self) -> Dict[str, AddModelResult]:
def scan_autoimport_directory(self) -> Dict[str, AddModelResult]:
"""
Scan the autoimport directory (if defined) and import new models, delete defunct models.
"""
@ -971,7 +983,7 @@ class ModelManager(object):
# LS: hacky
# Patch in the SD VAE from core so that it is available for use by the UI
try:
self.heuristic_import({config.root_path / "models/core/convert/sd-vae-ft-mse"})
self.heuristic_import({self.resolve_model_path("core/convert/sd-vae-ft-mse")})
except:
pass

View File

@ -17,6 +17,7 @@ from .base import (
ModelNotFoundException,
)
from invokeai.app.services.config import InvokeAIAppConfig
import invokeai.backend.util.logging as logger
class ControlNetModelFormat(str, Enum):
@ -66,7 +67,7 @@ class ControlNetModel(ModelBase):
child_type: Optional[SubModelType] = None,
):
if child_type is not None:
raise Exception("There is no child models in controlnet model")
raise Exception("There are no child models in controlnet model")
model = None
for variant in ["fp16", None]:
@ -124,9 +125,7 @@ class ControlNetModel(ModelBase):
return model_path
@classmethod
def _convert_controlnet_ckpt_and_cache(
cls,
model_path: str,
output_path: str,
base_model: BaseModelType,
@ -141,6 +140,7 @@ def _convert_controlnet_ckpt_and_cache(
weights = app_config.root_path / model_path
output_path = Path(output_path)
logger.info(f"Converting {weights} to diffusers format")
# return cached version if it exists
if output_path.exists():
return output_path

View File

@ -123,6 +123,7 @@ class StableDiffusion1Model(DiffusersModel):
return _convert_ckpt_and_cache(
version=BaseModelType.StableDiffusion1,
model_config=config,
load_safety_checker=False,
output_path=output_path,
)
else:
@ -259,7 +260,7 @@ def _convert_ckpt_and_cache(
"""
app_config = InvokeAIAppConfig.get_config()
weights = app_config.root_path / model_config.path
weights = app_config.models_path / model_config.path
config_file = app_config.root_path / model_config.config
output_path = Path(output_path)

View File

@ -112,7 +112,7 @@ def main():
extras = get_extras()
print(f":crossed_fingers: Upgrading to [yellow]{tag if tag else release}[/yellow]")
print(f":crossed_fingers: Upgrading to [yellow]{tag or release or branch}[/yellow]")
if release:
cmd = f'pip install "invokeai{extras} @ {INVOKE_AI_SRC}/{release}.zip" --use-pep517 --upgrade'
elif tag:

View File

@ -58,6 +58,9 @@ logger = InvokeAILogger.getLogger()
# from https://stackoverflow.com/questions/92438/stripping-non-printable-characters-from-a-string-in-python
NOPRINT_TRANS_TABLE = {i: None for i in range(0, sys.maxunicode + 1) if not chr(i).isprintable()}
# maximum number of installed models we can display before overflowing vertically
MAX_OTHER_MODELS = 72
def make_printable(s: str) -> str:
"""Replace non-printable characters in a string"""
@ -102,7 +105,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
SingleSelectColumns,
values=[
"STARTER MODELS",
"MORE MODELS",
"MAIN MODELS",
"CONTROLNETS",
"LORA/LYCORIS",
"TEXTUAL INVERSION",
@ -153,7 +156,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
BufferBox,
name="Log Messages",
editable=False,
max_height=8,
max_height=15,
)
self.nextrely += 1
@ -271,6 +274,11 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
)
)
truncation = False
if len(model_labels) > MAX_OTHER_MODELS:
model_labels = model_labels[0:MAX_OTHER_MODELS]
truncation = True
widgets.update(
models_selected=self.add_widget_intelligent(
MultiSelectColumns,
@ -289,6 +297,16 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
models=model_list,
)
if truncation:
widgets.update(
warning_message=self.add_widget_intelligent(
npyscreen.FixedText,
value=f"Too many models to display (max={MAX_OTHER_MODELS}). Some are not displayed.",
editable=False,
color="CAUTION",
)
)
self.nextrely += 1
widgets.update(
download_ids=self.add_widget_intelligent(
@ -313,7 +331,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
widgets = self.add_model_widgets(
model_type=model_type,
window_width=window_width,
install_prompt=f"Additional {model_type.value.title()} models already installed.",
install_prompt=f"Installed {model_type.value.title()} models. Unchecked models in the InvokeAI root directory will be deleted. Enter URLs, paths or repo_ids to import.",
**kwargs,
)
@ -399,7 +417,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self.ok_button.hidden = True
self.display()
# for communication with the subprocess
# TO DO: Spawn a worker thread, not a subprocess
parent_conn, child_conn = Pipe()
p = Process(
target=process_and_execute,
@ -414,7 +432,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
self.subprocess_connection = parent_conn
self.subprocess = p
app.install_selections = InstallSelections()
# process_and_execute(app.opt, app.install_selections)
def on_back(self):
self.parentApp.switchFormPrevious()
@ -489,8 +506,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
# rebuild the form, saving and restoring some of the fields that need to be preserved.
saved_messages = self.monitor.entry_widget.values
# autoload_dir = str(config.root_path / self.pipeline_models['autoload_directory'].value)
# autoscan = self.pipeline_models['autoscan_on_startup'].value
app.main_form = app.addForm(
"MAIN",
@ -544,12 +559,6 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
if downloads := section.get("download_ids"):
selections.install_models.extend(downloads.value.split())
# load directory and whether to scan on startup
# if self.parentApp.autoload_pending:
# selections.scan_directory = str(config.root_path / self.pipeline_models['autoload_directory'].value)
# self.parentApp.autoload_pending = False
# selections.autoscan_on_startup = self.pipeline_models['autoscan_on_startup'].value
class AddModelApplication(npyscreen.NPSAppManaged):
def __init__(self, opt):
@ -639,6 +648,10 @@ def process_and_execute(
selections: InstallSelections,
conn_out: Connection = None,
):
# need to reinitialize config in subprocess
config = InvokeAIAppConfig.get_config()
config.parse_args()
# set up so that stderr is sent to conn_out
if conn_out:
translator = StderrToMessage(conn_out)
@ -656,38 +669,11 @@ def process_and_execute(
conn_out.close()
def do_listings(opt) -> bool:
"""List installed models of various sorts, and return
True if any were requested."""
model_manager = ModelManager(config.model_conf_path)
if opt.list_models == "diffusers":
print("Diffuser models:")
model_manager.print_models()
elif opt.list_models == "controlnets":
print("Installed Controlnet Models:")
cnm = model_manager.list_controlnet_models()
print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" "))
elif opt.list_models == "loras":
print("Installed LoRA/LyCORIS Models:")
cnm = model_manager.list_lora_models()
print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" "))
elif opt.list_models == "tis":
print("Installed Textual Inversion Embeddings:")
cnm = model_manager.list_ti_models()
print(textwrap.indent("\n".join([x for x in cnm if cnm[x]]), prefix=" "))
else:
return False
return True
# --------------------------------------------------------
def select_and_download_models(opt: Namespace):
precision = "float32" if opt.full_precision else choose_precision(torch.device(choose_torch_device()))
config.precision = precision
helper = lambda x: ask_user_for_prediction_type(x)
# if do_listings(opt):
# pass
installer = ModelInstall(config, prediction_type_helper=helper)
if opt.list_models:
installer.list_models(opt.list_models)
@ -706,8 +692,6 @@ def select_and_download_models(opt: Namespace):
# needed to support the probe() method running under a subprocess
torch.multiprocessing.set_start_method("spawn")
# the third argument is needed in the Windows 11 environment in
# order to launch and resize a console window running this program
set_min_terminal_size(MIN_COLS, MIN_LINES)
installApp = AddModelApplication(opt)
try: