mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
reduce VRAM cache default; take max RAM from system
This commit is contained in:
parent
29ac252501
commit
5de42be4a6
@ -24,7 +24,7 @@ InvokeAI:
|
|||||||
sequential_guidance: false
|
sequential_guidance: false
|
||||||
precision: float16
|
precision: float16
|
||||||
max_cache_size: 6
|
max_cache_size: 6
|
||||||
max_vram_cache_size: 2.7
|
max_vram_cache_size: 0.5
|
||||||
always_use_cpu: false
|
always_use_cpu: false
|
||||||
free_gpu_mem: false
|
free_gpu_mem: false
|
||||||
Features:
|
Features:
|
||||||
@ -173,8 +173,7 @@ from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_ty
|
|||||||
INIT_FILE = Path("invokeai.yaml")
|
INIT_FILE = Path("invokeai.yaml")
|
||||||
DB_FILE = Path("invokeai.db")
|
DB_FILE = Path("invokeai.db")
|
||||||
LEGACY_INIT_FILE = Path("invokeai.init")
|
LEGACY_INIT_FILE = Path("invokeai.init")
|
||||||
DEFAULT_MAX_VRAM = 2.75
|
DEFAULT_MAX_VRAM = 0.5
|
||||||
|
|
||||||
|
|
||||||
class InvokeAISettings(BaseSettings):
|
class InvokeAISettings(BaseSettings):
|
||||||
"""
|
"""
|
||||||
|
@ -10,6 +10,7 @@ import sys
|
|||||||
import argparse
|
import argparse
|
||||||
import io
|
import io
|
||||||
import os
|
import os
|
||||||
|
import psutil
|
||||||
import shutil
|
import shutil
|
||||||
import textwrap
|
import textwrap
|
||||||
import torch
|
import torch
|
||||||
@ -79,10 +80,13 @@ Default_config_file = config.model_conf_path
|
|||||||
SD_Configs = config.legacy_conf_path
|
SD_Configs = config.legacy_conf_path
|
||||||
|
|
||||||
PRECISION_CHOICES = ["auto", "float16", "float32"]
|
PRECISION_CHOICES = ["auto", "float16", "float32"]
|
||||||
|
GB = 1073741824 # GB in bytes
|
||||||
HAS_CUDA = torch.cuda.is_available()
|
HAS_CUDA = torch.cuda.is_available()
|
||||||
_, MAX_VRAM = torch.cuda.mem_get_info() if HAS_CUDA else (0, 0)
|
_, MAX_VRAM = torch.cuda.mem_get_info() if HAS_CUDA else (0, 0)
|
||||||
MAX_VRAM /= 1073741824 # GB in bytes
|
|
||||||
MAX_VRAM_CACHE_RATIO = 0.55 # first guess of optimal vram cache based on total available
|
|
||||||
|
MAX_VRAM /= GB
|
||||||
|
MAX_RAM = psutil.virtual_memory().total / GB
|
||||||
|
|
||||||
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
|
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
|
||||||
# This is the InvokeAI initialization file, which contains command-line default values.
|
# This is the InvokeAI initialization file, which contains command-line default values.
|
||||||
@ -391,9 +395,9 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
|||||||
)
|
)
|
||||||
self.max_cache_size = self.add_widget_intelligent(
|
self.max_cache_size = self.add_widget_intelligent(
|
||||||
IntTitleSlider,
|
IntTitleSlider,
|
||||||
name="RAM cache size. The larger this is, the more models can be kept in memory rather than loading from disk each time (GB)",
|
name="RAM cache size (GB). Make this at least large enough to hold a single model. Larger sizes will allow you to switch between models quickly without reading from disk.",
|
||||||
value=old_opts.max_cache_size,
|
value=old_opts.max_cache_size,
|
||||||
out_of=20,
|
out_of=MAX_RAM,
|
||||||
lowest=3,
|
lowest=3,
|
||||||
begin_entry_at=6,
|
begin_entry_at=6,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
@ -402,7 +406,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
|||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
self.add_widget_intelligent(
|
self.add_widget_intelligent(
|
||||||
npyscreen.TitleFixedText,
|
npyscreen.TitleFixedText,
|
||||||
name="VRAM cache size. Make this large enough to hold an entire model, but not more than half your available VRAM (GB)",
|
name="VRAM cache size (GB). Reserving a small amount of VRAM will modestly speed up the start of image generation.",
|
||||||
begin_entry_at=0,
|
begin_entry_at=0,
|
||||||
editable=False,
|
editable=False,
|
||||||
color="CONTROL",
|
color="CONTROL",
|
||||||
@ -416,7 +420,6 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
|||||||
lowest=0.0,
|
lowest=0.0,
|
||||||
relx=8,
|
relx=8,
|
||||||
step=0.25,
|
step=0.25,
|
||||||
begin_entry_at=MAX_VRAM * 0.55,
|
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
@ -569,9 +572,6 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam
|
|||||||
|
|
||||||
def default_startup_options(init_file: Path) -> Namespace:
|
def default_startup_options(init_file: Path) -> Namespace:
|
||||||
opts = InvokeAIAppConfig.get_config()
|
opts = InvokeAIAppConfig.get_config()
|
||||||
# dynamically adust vram for memory size
|
|
||||||
if not init_file.exists():
|
|
||||||
opts.max_vram_cache_size = round((MAX_VRAM * MAX_VRAM_CACHE_RATIO) * 4) / 4
|
|
||||||
return opts
|
return opts
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user