diff --git a/installer/lib/installer.py b/installer/lib/installer.py
index aaf5779801..70ed4d4331 100644
--- a/installer/lib/installer.py
+++ b/installer/lib/installer.py
@@ -332,6 +332,7 @@ class InvokeAiInstance:
Configure the InvokeAI runtime directory
"""
+ auto_install = False
# set sys.argv to a consistent state
new_argv = [sys.argv[0]]
for i in range(1, len(sys.argv)):
@@ -340,13 +341,17 @@ class InvokeAiInstance:
new_argv.append(el)
new_argv.append(sys.argv[i + 1])
elif el in ["-y", "--yes", "--yes-to-all"]:
- new_argv.append(el)
+ auto_install = True
sys.argv = new_argv
+ import messages
import requests # to catch download exceptions
- from messages import introduction
- introduction()
+ auto_install = auto_install or messages.user_wants_auto_configuration()
+ if auto_install:
+ sys.argv.append("--yes")
+ else:
+ messages.introduction()
from invokeai.frontend.install.invokeai_configure import invokeai_configure
diff --git a/installer/lib/messages.py b/installer/lib/messages.py
index c5a39dc91c..e4c03bbfd2 100644
--- a/installer/lib/messages.py
+++ b/installer/lib/messages.py
@@ -7,7 +7,7 @@ import os
import platform
from pathlib import Path
-from prompt_toolkit import prompt
+from prompt_toolkit import HTML, prompt
from prompt_toolkit.completion import PathCompleter
from prompt_toolkit.validation import Validator
from rich import box, print
@@ -65,17 +65,50 @@ def confirm_install(dest: Path) -> bool:
if dest.exists():
print(f":exclamation: Directory {dest} already exists :exclamation:")
dest_confirmed = Confirm.ask(
- ":stop_sign: Are you sure you want to (re)install in this location?",
+ ":stop_sign: (re)install in this location?",
default=False,
)
else:
print(f"InvokeAI will be installed in {dest}")
- dest_confirmed = not Confirm.ask("Would you like to pick a different location?", default=False)
+ dest_confirmed = Confirm.ask("Use this location?", default=True)
console.line()
return dest_confirmed
+def user_wants_auto_configuration() -> bool:
+ """Prompt the user to choose between manual and auto configuration."""
+ console.rule("InvokeAI Configuration Section")
+ console.print(
+ Panel(
+ Group(
+ "\n".join(
+ [
+ "Libraries are installed and InvokeAI will now set up its root directory and configuration. Choose between:",
+ "",
+ " * AUTOMATIC configuration: install reasonable defaults and a minimal set of starter models.",
+ " * MANUAL configuration: manually inspect and adjust configuration options and pick from a larger set of starter models.",
+ "",
+ "Later you can fine tune your configuration by selecting option [6] 'Change InvokeAI startup options' from the invoke.bat/invoke.sh launcher script.",
+ ]
+ ),
+ ),
+ box=box.MINIMAL,
+ padding=(1, 1),
+ )
+ )
+ choice = (
+ prompt(
+ HTML("Choose <a>utomatic or <m>anual configuration [a/m] (a): "),
+ validator=Validator.from_callable(
+ lambda n: n == "" or n.startswith(("a", "A", "m", "M")), error_message="Please select 'a' or 'm'"
+ ),
+ )
+ or "a"
+ )
+ return choice.lower().startswith("a")
+
+
def dest_path(dest=None) -> Path:
"""
Prompt the user for the destination path and create the path
diff --git a/invokeai/app/services/config/invokeai_config.py b/invokeai/app/services/config/invokeai_config.py
index 51ccf45704..8ea703f39a 100644
--- a/invokeai/app/services/config/invokeai_config.py
+++ b/invokeai/app/services/config/invokeai_config.py
@@ -241,7 +241,7 @@ class InvokeAIAppConfig(InvokeAISettings):
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
# CACHE
- ram : Union[float, Literal["auto"]] = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number or 'auto')", category="Model Cache", )
+ ram : Union[float, Literal["auto"]] = Field(default=7.5, gt=0, description="Maximum memory amount used by model cache for rapid switching (floating point number or 'auto')", category="Model Cache", )
vram : Union[float, Literal["auto"]] = Field(default=0.25, ge=0, description="Amount of VRAM reserved for model storage (floating point number or 'auto')", category="Model Cache", )
lazy_offload : bool = Field(default=True, description="Keep models in VRAM until their space is needed", category="Model Cache", )
diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py
index ec2221e12d..5afbdfb5a3 100755
--- a/invokeai/backend/install/invokeai_configure.py
+++ b/invokeai/backend/install/invokeai_configure.py
@@ -70,7 +70,6 @@ def get_literal_fields(field) -> list[Any]:
config = InvokeAIAppConfig.get_config()
Model_dir = "models"
-
Default_config_file = config.model_conf_path
SD_Configs = config.legacy_conf_path
@@ -458,7 +457,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
)
self.add_widget_intelligent(
npyscreen.TitleFixedText,
- name="Model RAM cache size (GB). Make this at least large enough to hold a single full model.",
+ name="Model RAM cache size (GB). Make this at least large enough to hold a single full model (2GB for SD-1, 6GB for SDXL).",
begin_entry_at=0,
editable=False,
color="CONTROL",
@@ -651,8 +650,19 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam
return editApp.new_opts()
+def default_ramcache() -> float:
+ """Run a heuristic for the default RAM cache based on installed RAM."""
+
+ # Note that on my 64 GB machine, psutil.virtual_memory().total gives 62 GB,
+ # So we adjust everthing down a bit.
+ return (
+ 15.0 if MAX_RAM >= 60 else 7.5 if MAX_RAM >= 30 else 4 if MAX_RAM >= 14 else 2.1
+ ) # 2.1 is just large enough for sd 1.5 ;-)
+
+
def default_startup_options(init_file: Path) -> Namespace:
opts = InvokeAIAppConfig.get_config()
+ opts.ram = default_ramcache()
return opts