Clip RAM and VRAM to maximum system values

- Prevent a crash in `configure_invokeai` when user has manually set
  RAM or VRAM cache sizes to larger than the size available on system.

- Remove redundant imports from `invokeai.frontend.config.__init__.py`
  which were contributing to circular dependencies.
This commit is contained in:
Lincoln Stein 2023-08-09 14:46:41 -04:00
parent 2a38bfdc25
commit ac0c8d31bb
5 changed files with 17 additions and 12 deletions

View File

@ -21,7 +21,6 @@ from argparse import Namespace
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
from shutil import get_terminal_size from shutil import get_terminal_size
from typing import get_type_hints
from urllib import request from urllib import request
import npyscreen import npyscreen
@ -399,7 +398,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
self.max_cache_size = self.add_widget_intelligent( self.max_cache_size = self.add_widget_intelligent(
IntTitleSlider, IntTitleSlider,
name="RAM cache size (GB). Make this at least large enough to hold a single full model.", name="RAM cache size (GB). Make this at least large enough to hold a single full model.",
value=old_opts.max_cache_size, value=clip(old_opts.max_cache_size, range=(3.0, MAX_RAM)),
out_of=MAX_RAM, out_of=MAX_RAM,
lowest=3, lowest=3,
begin_entry_at=6, begin_entry_at=6,
@ -418,7 +417,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
self.nextrely -= 1 self.nextrely -= 1
self.max_vram_cache_size = self.add_widget_intelligent( self.max_vram_cache_size = self.add_widget_intelligent(
npyscreen.Slider, npyscreen.Slider,
value=old_opts.max_vram_cache_size, value=clip(old_opts.max_vram_cache_size, range=(0, MAX_VRAM)),
out_of=round(MAX_VRAM * 2) / 2, out_of=round(MAX_VRAM * 2) / 2,
lowest=0.0, lowest=0.0,
relx=8, relx=8,
@ -596,6 +595,16 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
) )
# -------------------------------------
def clip(value: float, range: tuple[float, float]) -> float:
minimum, maximum = range
if value < minimum:
value = minimum
if value > maximum:
value = maximum
return value
# ------------------------------------- # -------------------------------------
def initialize_rootdir(root: Path, yes_to_all: bool = False): def initialize_rootdir(root: Path, yes_to_all: bool = False):
logger.info("Initializing InvokeAI runtime directory") logger.info("Initializing InvokeAI runtime directory")

View File

@ -591,7 +591,6 @@ script, which will perform a full upgrade in place.""",
# TODO: revisit - don't rely on invokeai.yaml to exist yet! # TODO: revisit - don't rely on invokeai.yaml to exist yet!
dest_is_setup = (dest_root / "models/core").exists() and (dest_root / "databases").exists() dest_is_setup = (dest_root / "models/core").exists() and (dest_root / "databases").exists()
if not dest_is_setup: if not dest_is_setup:
import invokeai.frontend.install.invokeai_configure
from invokeai.backend.install.invokeai_configure import initialize_rootdir from invokeai.backend.install.invokeai_configure import initialize_rootdir
initialize_rootdir(dest_root, True) initialize_rootdir(dest_root, True)

View File

@ -1,6 +1,3 @@
""" """
Initialization file for invokeai.frontend.config Initialization file for invokeai.frontend.config
""" """
from .invokeai_configure import main as invokeai_configure
from .invokeai_update import main as invokeai_update
from .model_install import main as invokeai_model_install

View File

@ -1,4 +1,4 @@
""" """
Wrapper for invokeai.backend.configure.invokeai_configure Wrapper for invokeai.backend.configure.invokeai_configure
""" """
from ...backend.install.invokeai_configure import main from ...backend.install.invokeai_configure import main as invokeai_configure

View File

@ -118,7 +118,7 @@ dependencies = [
[project.scripts] [project.scripts]
# legacy entrypoints; provided for backwards compatibility # legacy entrypoints; provided for backwards compatibility
"configure_invokeai.py" = "invokeai.frontend.install:invokeai_configure" "configure_invokeai.py" = "invokeai.frontend.install.invokeai_configure:invokeai_configure"
"textual_inversion.py" = "invokeai.frontend.training:invokeai_textual_inversion" "textual_inversion.py" = "invokeai.frontend.training:invokeai_textual_inversion"
# shortcut commands to start cli and web # shortcut commands to start cli and web
@ -130,12 +130,12 @@ dependencies = [
"invokeai-web" = "invokeai.app.api_app:invoke_api" "invokeai-web" = "invokeai.app.api_app:invoke_api"
# full commands # full commands
"invokeai-configure" = "invokeai.frontend.install:invokeai_configure" "invokeai-configure" = "invokeai.frontend.install.invokeai_configure:invokeai_configure"
"invokeai-merge" = "invokeai.frontend.merge:invokeai_merge_diffusers" "invokeai-merge" = "invokeai.frontend.merge:invokeai_merge_diffusers"
"invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion" "invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion"
"invokeai-model-install" = "invokeai.frontend.install:invokeai_model_install" "invokeai-model-install" = "invokeai.frontend.install.model_install:main"
"invokeai-migrate3" = "invokeai.backend.install.migrate_to_3:main" "invokeai-migrate3" = "invokeai.backend.install.migrate_to_3:main"
"invokeai-update" = "invokeai.frontend.install:invokeai_update" "invokeai-update" = "invokeai.frontend.install.invokeai_update:main"
"invokeai-metadata" = "invokeai.frontend.CLI.sd_metadata:print_metadata" "invokeai-metadata" = "invokeai.frontend.CLI.sd_metadata:print_metadata"
"invokeai-node-cli" = "invokeai.app.cli_app:invoke_cli" "invokeai-node-cli" = "invokeai.app.cli_app:invoke_cli"
"invokeai-node-web" = "invokeai.app.api_app:invoke_api" "invokeai-node-web" = "invokeai.app.api_app:invoke_api"