mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Clip RAM and VRAM to maximum system values
- Prevent a crash in `configure_invokeai` when user has manually set RAM or VRAM cache sizes to larger than the size available on system. - Remove redundant imports from `invokeai.frontend.config.__init__.py` which were contributing to circular dependencies.
This commit is contained in:
parent
2a38bfdc25
commit
ac0c8d31bb
@ -21,7 +21,6 @@ from argparse import Namespace
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from shutil import get_terminal_size
|
||||
from typing import get_type_hints
|
||||
from urllib import request
|
||||
|
||||
import npyscreen
|
||||
@ -399,7 +398,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
||||
self.max_cache_size = self.add_widget_intelligent(
|
||||
IntTitleSlider,
|
||||
name="RAM cache size (GB). Make this at least large enough to hold a single full model.",
|
||||
value=old_opts.max_cache_size,
|
||||
value=clip(old_opts.max_cache_size, range=(3.0, MAX_RAM)),
|
||||
out_of=MAX_RAM,
|
||||
lowest=3,
|
||||
begin_entry_at=6,
|
||||
@ -418,7 +417,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
||||
self.nextrely -= 1
|
||||
self.max_vram_cache_size = self.add_widget_intelligent(
|
||||
npyscreen.Slider,
|
||||
value=old_opts.max_vram_cache_size,
|
||||
value=clip(old_opts.max_vram_cache_size, range=(0, MAX_VRAM)),
|
||||
out_of=round(MAX_VRAM * 2) / 2,
|
||||
lowest=0.0,
|
||||
relx=8,
|
||||
@ -596,6 +595,16 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
|
||||
)
|
||||
|
||||
|
||||
# -------------------------------------
|
||||
def clip(value: float, range: tuple[float, float]) -> float:
|
||||
minimum, maximum = range
|
||||
if value < minimum:
|
||||
value = minimum
|
||||
if value > maximum:
|
||||
value = maximum
|
||||
return value
|
||||
|
||||
|
||||
# -------------------------------------
|
||||
def initialize_rootdir(root: Path, yes_to_all: bool = False):
|
||||
logger.info("Initializing InvokeAI runtime directory")
|
||||
|
@ -591,7 +591,6 @@ script, which will perform a full upgrade in place.""",
|
||||
# TODO: revisit - don't rely on invokeai.yaml to exist yet!
|
||||
dest_is_setup = (dest_root / "models/core").exists() and (dest_root / "databases").exists()
|
||||
if not dest_is_setup:
|
||||
import invokeai.frontend.install.invokeai_configure
|
||||
from invokeai.backend.install.invokeai_configure import initialize_rootdir
|
||||
|
||||
initialize_rootdir(dest_root, True)
|
||||
|
@ -1,6 +1,3 @@
|
||||
"""
|
||||
Initialization file for invokeai.frontend.config
|
||||
"""
|
||||
from .invokeai_configure import main as invokeai_configure
|
||||
from .invokeai_update import main as invokeai_update
|
||||
from .model_install import main as invokeai_model_install
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""
|
||||
Wrapper for invokeai.backend.configure.invokeai_configure
|
||||
"""
|
||||
from ...backend.install.invokeai_configure import main
|
||||
from ...backend.install.invokeai_configure import main as invokeai_configure
|
||||
|
@ -118,7 +118,7 @@ dependencies = [
|
||||
[project.scripts]
|
||||
|
||||
# legacy entrypoints; provided for backwards compatibility
|
||||
"configure_invokeai.py" = "invokeai.frontend.install:invokeai_configure"
|
||||
"configure_invokeai.py" = "invokeai.frontend.install.invokeai_configure:invokeai_configure"
|
||||
"textual_inversion.py" = "invokeai.frontend.training:invokeai_textual_inversion"
|
||||
|
||||
# shortcut commands to start cli and web
|
||||
@ -130,12 +130,12 @@ dependencies = [
|
||||
"invokeai-web" = "invokeai.app.api_app:invoke_api"
|
||||
|
||||
# full commands
|
||||
"invokeai-configure" = "invokeai.frontend.install:invokeai_configure"
|
||||
"invokeai-configure" = "invokeai.frontend.install.invokeai_configure:invokeai_configure"
|
||||
"invokeai-merge" = "invokeai.frontend.merge:invokeai_merge_diffusers"
|
||||
"invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion"
|
||||
"invokeai-model-install" = "invokeai.frontend.install:invokeai_model_install"
|
||||
"invokeai-model-install" = "invokeai.frontend.install.model_install:main"
|
||||
"invokeai-migrate3" = "invokeai.backend.install.migrate_to_3:main"
|
||||
"invokeai-update" = "invokeai.frontend.install:invokeai_update"
|
||||
"invokeai-update" = "invokeai.frontend.install.invokeai_update:main"
|
||||
"invokeai-metadata" = "invokeai.frontend.CLI.sd_metadata:print_metadata"
|
||||
"invokeai-node-cli" = "invokeai.app.cli_app:invoke_cli"
|
||||
"invokeai-node-web" = "invokeai.app.api_app:invoke_api"
|
||||
|
Loading…
Reference in New Issue
Block a user