add slider for VRAM cache

This commit is contained in:
Lincoln Stein 2023-08-02 09:11:24 -04:00
parent 4599575e65
commit 77c5c18542
2 changed files with 38 additions and 2 deletions

View File

@ -12,10 +12,12 @@ import io
import os import os
import shutil import shutil
import textwrap import textwrap
import torch
import traceback import traceback
import yaml import yaml
import warnings import warnings
from argparse import Namespace from argparse import Namespace
from enum import Enum
from pathlib import Path from pathlib import Path
from shutil import get_terminal_size from shutil import get_terminal_size
from typing import get_type_hints from typing import get_type_hints
@ -49,6 +51,7 @@ from invokeai.frontend.install.widgets import (
CenteredButtonPress, CenteredButtonPress,
FileBox, FileBox,
IntTitleSlider, IntTitleSlider,
FloatTitleSlider,
set_min_terminal_size, set_min_terminal_size,
CyclingForm, CyclingForm,
MIN_COLS, MIN_COLS,
@ -76,6 +79,9 @@ Default_config_file = config.model_conf_path
SD_Configs = config.legacy_conf_path SD_Configs = config.legacy_conf_path
PRECISION_CHOICES = ["auto", "float16", "float32"] PRECISION_CHOICES = ["auto", "float16", "float32"]
HAS_CUDA = torch.cuda.is_available()
_, MAX_VRAM = torch.cuda.mem_get_info() if HAS_CUDA else (0, 0)
MAX_VRAM /= 1073741824 # GB in bytes
INIT_FILE_PREAMBLE = """# InvokeAI initialization file INIT_FILE_PREAMBLE = """# InvokeAI initialization file
# This is the InvokeAI initialization file, which contains command-line default values. # This is the InvokeAI initialization file, which contains command-line default values.
@ -86,6 +92,12 @@ INIT_FILE_PREAMBLE = """# InvokeAI initialization file
logger = InvokeAILogger.getLogger() logger = InvokeAILogger.getLogger()
class DummyWidgetValue(Enum):
zero = 0
true = True
false = False
# -------------------------------------------- # --------------------------------------------
def postscript(errors: None): def postscript(errors: None):
if not any(errors): if not any(errors):
@ -378,13 +390,36 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
) )
self.max_cache_size = self.add_widget_intelligent( self.max_cache_size = self.add_widget_intelligent(
IntTitleSlider, IntTitleSlider,
name="Size of the RAM cache used for fast model switching (GB)", name="RAM cache size. The larger this is, the more models can be kept in memory rather than loading from disk each time (GB)",
value=old_opts.max_cache_size, value=old_opts.max_cache_size,
out_of=20, out_of=20,
lowest=3, lowest=3,
begin_entry_at=6, begin_entry_at=6,
scroll_exit=True, scroll_exit=True,
) )
if HAS_CUDA:
self.nextrely += 1
self.add_widget_intelligent(
npyscreen.TitleFixedText,
name="VRAM cache size. Make this large enough to hold an entire model, but not more than half your available VRAM (GB)",
begin_entry_at=0,
editable=False,
color="CONTROL",
scroll_exit=True,
)
self.nextrely -= 1
self.max_vram_cache_size = self.add_widget_intelligent(
npyscreen.Slider,
value=old_opts.max_vram_cache_size,
out_of=round(MAX_VRAM * 2) / 2,
lowest=0.0,
relx=8,
step=0.25,
begin_entry_at=MAX_VRAM * 0.55,
scroll_exit=True,
)
else:
self.max_vram_cache_size = DummyWidgetValue.zero
self.nextrely += 1 self.nextrely += 1
self.outdir = self.add_widget_intelligent( self.outdir = self.add_widget_intelligent(
FileBox, FileBox,
@ -476,6 +511,7 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
"outdir", "outdir",
"free_gpu_mem", "free_gpu_mem",
"max_cache_size", "max_cache_size",
"max_vram_cache_size",
"xformers_enabled", "xformers_enabled",
"always_use_cpu", "always_use_cpu",
]: ]:

View File

@ -164,7 +164,7 @@ class FloatSlider(npyscreen.Slider):
class FloatTitleSlider(npyscreen.TitleText): class FloatTitleSlider(npyscreen.TitleText):
_entry_type = FloatSlider _entry_type = npyscreen.Slider
class SelectColumnBase: class SelectColumnBase: