mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into fix/detect-more-loras
This commit is contained in:
commit
7d4ace962a
@ -24,11 +24,10 @@ InvokeAI:
|
|||||||
sequential_guidance: false
|
sequential_guidance: false
|
||||||
precision: float16
|
precision: float16
|
||||||
max_cache_size: 6
|
max_cache_size: 6
|
||||||
max_vram_cache_size: 2.7
|
max_vram_cache_size: 0.5
|
||||||
always_use_cpu: false
|
always_use_cpu: false
|
||||||
free_gpu_mem: false
|
free_gpu_mem: false
|
||||||
Features:
|
Features:
|
||||||
restore: true
|
|
||||||
esrgan: true
|
esrgan: true
|
||||||
patchmatch: true
|
patchmatch: true
|
||||||
internet_available: true
|
internet_available: true
|
||||||
@ -165,7 +164,7 @@ import pydoc
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
from omegaconf import OmegaConf, DictConfig
|
from omegaconf import OmegaConf, DictConfig, ListConfig
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from pydantic import BaseSettings, Field, parse_obj_as
|
from pydantic import BaseSettings, Field, parse_obj_as
|
||||||
from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_type_hints, get_args
|
from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_type_hints, get_args
|
||||||
@ -173,6 +172,7 @@ from typing import ClassVar, Dict, List, Set, Literal, Union, get_origin, get_ty
|
|||||||
INIT_FILE = Path("invokeai.yaml")
|
INIT_FILE = Path("invokeai.yaml")
|
||||||
DB_FILE = Path("invokeai.db")
|
DB_FILE = Path("invokeai.db")
|
||||||
LEGACY_INIT_FILE = Path("invokeai.init")
|
LEGACY_INIT_FILE = Path("invokeai.init")
|
||||||
|
DEFAULT_MAX_VRAM = 0.5
|
||||||
|
|
||||||
|
|
||||||
class InvokeAISettings(BaseSettings):
|
class InvokeAISettings(BaseSettings):
|
||||||
@ -189,7 +189,12 @@ class InvokeAISettings(BaseSettings):
|
|||||||
opt = parser.parse_args(argv)
|
opt = parser.parse_args(argv)
|
||||||
for name in self.__fields__:
|
for name in self.__fields__:
|
||||||
if name not in self._excluded():
|
if name not in self._excluded():
|
||||||
setattr(self, name, getattr(opt, name))
|
value = getattr(opt, name)
|
||||||
|
if isinstance(value, ListConfig):
|
||||||
|
value = list(value)
|
||||||
|
elif isinstance(value, DictConfig):
|
||||||
|
value = dict(value)
|
||||||
|
setattr(self, name, value)
|
||||||
|
|
||||||
def to_yaml(self) -> str:
|
def to_yaml(self) -> str:
|
||||||
"""
|
"""
|
||||||
@ -282,14 +287,10 @@ class InvokeAISettings(BaseSettings):
|
|||||||
return [
|
return [
|
||||||
"type",
|
"type",
|
||||||
"initconf",
|
"initconf",
|
||||||
"gpu_mem_reserved",
|
|
||||||
"max_loaded_models",
|
|
||||||
"version",
|
"version",
|
||||||
"from_file",
|
"from_file",
|
||||||
"model",
|
"model",
|
||||||
"restore",
|
|
||||||
"root",
|
"root",
|
||||||
"nsfw_checker",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
@ -388,15 +389,11 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", category='Features')
|
internet_available : bool = Field(default=True, description="If true, attempt to download models on the fly; otherwise only use local models", category='Features')
|
||||||
log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", category='Features')
|
log_tokenization : bool = Field(default=False, description="Enable logging of parsed prompt tokens.", category='Features')
|
||||||
patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", category='Features')
|
patchmatch : bool = Field(default=True, description="Enable/disable patchmatch inpaint code", category='Features')
|
||||||
restore : bool = Field(default=True, description="Enable/disable face restoration code (DEPRECATED)", category='DEPRECATED')
|
|
||||||
|
|
||||||
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance')
|
always_use_cpu : bool = Field(default=False, description="If true, use the CPU for rendering even if a GPU is available.", category='Memory/Performance')
|
||||||
free_gpu_mem : bool = Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance')
|
free_gpu_mem : bool = Field(default=False, description="If true, purge model from GPU after each generation.", category='Memory/Performance')
|
||||||
max_loaded_models : int = Field(default=3, gt=0, description="(DEPRECATED: use max_cache_size) Maximum number of models to keep in memory for rapid switching", category='DEPRECATED')
|
|
||||||
max_cache_size : float = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance')
|
max_cache_size : float = Field(default=6.0, gt=0, description="Maximum memory amount used by model cache for rapid switching", category='Memory/Performance')
|
||||||
max_vram_cache_size : float = Field(default=2.75, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance')
|
max_vram_cache_size : float = Field(default=2.75, ge=0, description="Amount of VRAM reserved for model storage", category='Memory/Performance')
|
||||||
gpu_mem_reserved : float = Field(default=2.75, ge=0, description="DEPRECATED: use max_vram_cache_size. Amount of VRAM reserved for model storage", category='DEPRECATED')
|
|
||||||
nsfw_checker : bool = Field(default=True, description="DEPRECATED: use Web settings to enable/disable", category='DEPRECATED')
|
|
||||||
precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='auto',description='Floating point precision', category='Memory/Performance')
|
precision : Literal[tuple(['auto','float16','float32','autocast'])] = Field(default='auto',description='Floating point precision', category='Memory/Performance')
|
||||||
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance')
|
sequential_guidance : bool = Field(default=False, description="Whether to calculate guidance in serial instead of in parallel, lowering memory requirements", category='Memory/Performance')
|
||||||
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance')
|
xformers_enabled : bool = Field(default=True, description="Enable/disable memory-efficient attention", category='Memory/Performance')
|
||||||
@ -414,9 +411,7 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths')
|
outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths')
|
||||||
from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths')
|
from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths')
|
||||||
use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths')
|
use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths')
|
||||||
ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert')
|
ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert', category='Features')
|
||||||
|
|
||||||
model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models')
|
|
||||||
|
|
||||||
log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>"', category="Logging")
|
log_handlers : List[str] = Field(default=["console"], description='Log handler. Valid options are "console", "file=<path>", "syslog=path|address:host:port", "http=<url>"', category="Logging")
|
||||||
# note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues
|
# note - would be better to read the log_format values from logging.py, but this creates circular dependencies issues
|
||||||
@ -426,6 +421,9 @@ class InvokeAIAppConfig(InvokeAISettings):
|
|||||||
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
|
version : bool = Field(default=False, description="Show InvokeAI version and exit", category="Other")
|
||||||
# fmt: on
|
# fmt: on
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
validate_assignment = True
|
||||||
|
|
||||||
def parse_args(self, argv: List[str] = None, conf: DictConfig = None, clobber=False):
|
def parse_args(self, argv: List[str] = None, conf: DictConfig = None, clobber=False):
|
||||||
"""
|
"""
|
||||||
Update settings with contents of init file, environment, and
|
Update settings with contents of init file, environment, and
|
||||||
|
@ -10,12 +10,15 @@ import sys
|
|||||||
import argparse
|
import argparse
|
||||||
import io
|
import io
|
||||||
import os
|
import os
|
||||||
|
import psutil
|
||||||
import shutil
|
import shutil
|
||||||
import textwrap
|
import textwrap
|
||||||
|
import torch
|
||||||
import traceback
|
import traceback
|
||||||
import yaml
|
import yaml
|
||||||
import warnings
|
import warnings
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
|
from enum import Enum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import get_terminal_size
|
from shutil import get_terminal_size
|
||||||
from typing import get_type_hints
|
from typing import get_type_hints
|
||||||
@ -44,6 +47,8 @@ from invokeai.app.services.config import (
|
|||||||
)
|
)
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
|
from invokeai.frontend.install.model_install import addModelsForm, process_and_execute
|
||||||
|
|
||||||
|
# TO DO - Move all the frontend code into invokeai.frontend.install
|
||||||
from invokeai.frontend.install.widgets import (
|
from invokeai.frontend.install.widgets import (
|
||||||
SingleSelectColumns,
|
SingleSelectColumns,
|
||||||
CenteredButtonPress,
|
CenteredButtonPress,
|
||||||
@ -53,6 +58,7 @@ from invokeai.frontend.install.widgets import (
|
|||||||
CyclingForm,
|
CyclingForm,
|
||||||
MIN_COLS,
|
MIN_COLS,
|
||||||
MIN_LINES,
|
MIN_LINES,
|
||||||
|
WindowTooSmallException,
|
||||||
)
|
)
|
||||||
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
|
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
|
||||||
from invokeai.backend.install.model_install_backend import (
|
from invokeai.backend.install.model_install_backend import (
|
||||||
@ -61,6 +67,7 @@ from invokeai.backend.install.model_install_backend import (
|
|||||||
ModelInstall,
|
ModelInstall,
|
||||||
)
|
)
|
||||||
from invokeai.backend.model_management.model_probe import ModelType, BaseModelType
|
from invokeai.backend.model_management.model_probe import ModelType, BaseModelType
|
||||||
|
from pydantic.error_wrappers import ValidationError
|
||||||
|
|
||||||
warnings.filterwarnings("ignore")
|
warnings.filterwarnings("ignore")
|
||||||
transformers.logging.set_verbosity_error()
|
transformers.logging.set_verbosity_error()
|
||||||
@ -76,6 +83,13 @@ Default_config_file = config.model_conf_path
|
|||||||
SD_Configs = config.legacy_conf_path
|
SD_Configs = config.legacy_conf_path
|
||||||
|
|
||||||
PRECISION_CHOICES = ["auto", "float16", "float32"]
|
PRECISION_CHOICES = ["auto", "float16", "float32"]
|
||||||
|
GB = 1073741824 # GB in bytes
|
||||||
|
HAS_CUDA = torch.cuda.is_available()
|
||||||
|
_, MAX_VRAM = torch.cuda.mem_get_info() if HAS_CUDA else (0, 0)
|
||||||
|
|
||||||
|
|
||||||
|
MAX_VRAM /= GB
|
||||||
|
MAX_RAM = psutil.virtual_memory().total / GB
|
||||||
|
|
||||||
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
|
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
|
||||||
# This is the InvokeAI initialization file, which contains command-line default values.
|
# This is the InvokeAI initialization file, which contains command-line default values.
|
||||||
@ -86,6 +100,12 @@ INIT_FILE_PREAMBLE = """# InvokeAI initialization file
|
|||||||
logger = InvokeAILogger.getLogger()
|
logger = InvokeAILogger.getLogger()
|
||||||
|
|
||||||
|
|
||||||
|
class DummyWidgetValue(Enum):
|
||||||
|
zero = 0
|
||||||
|
true = True
|
||||||
|
false = False
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------
|
# --------------------------------------------
|
||||||
def postscript(errors: None):
|
def postscript(errors: None):
|
||||||
if not any(errors):
|
if not any(errors):
|
||||||
@ -378,13 +398,35 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
|||||||
)
|
)
|
||||||
self.max_cache_size = self.add_widget_intelligent(
|
self.max_cache_size = self.add_widget_intelligent(
|
||||||
IntTitleSlider,
|
IntTitleSlider,
|
||||||
name="Size of the RAM cache used for fast model switching (GB)",
|
name="RAM cache size (GB). Make this at least large enough to hold a single full model.",
|
||||||
value=old_opts.max_cache_size,
|
value=old_opts.max_cache_size,
|
||||||
out_of=20,
|
out_of=MAX_RAM,
|
||||||
lowest=3,
|
lowest=3,
|
||||||
begin_entry_at=6,
|
begin_entry_at=6,
|
||||||
scroll_exit=True,
|
scroll_exit=True,
|
||||||
)
|
)
|
||||||
|
if HAS_CUDA:
|
||||||
|
self.nextrely += 1
|
||||||
|
self.add_widget_intelligent(
|
||||||
|
npyscreen.TitleFixedText,
|
||||||
|
name="VRAM cache size (GB). Reserving a small amount of VRAM will modestly speed up the start of image generation.",
|
||||||
|
begin_entry_at=0,
|
||||||
|
editable=False,
|
||||||
|
color="CONTROL",
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
self.nextrely -= 1
|
||||||
|
self.max_vram_cache_size = self.add_widget_intelligent(
|
||||||
|
npyscreen.Slider,
|
||||||
|
value=old_opts.max_vram_cache_size,
|
||||||
|
out_of=round(MAX_VRAM * 2) / 2,
|
||||||
|
lowest=0.0,
|
||||||
|
relx=8,
|
||||||
|
step=0.25,
|
||||||
|
scroll_exit=True,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.max_vram_cache_size = DummyWidgetValue.zero
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
self.outdir = self.add_widget_intelligent(
|
self.outdir = self.add_widget_intelligent(
|
||||||
FileBox,
|
FileBox,
|
||||||
@ -401,7 +443,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
|
|||||||
self.autoimport_dirs = {}
|
self.autoimport_dirs = {}
|
||||||
self.autoimport_dirs["autoimport_dir"] = self.add_widget_intelligent(
|
self.autoimport_dirs["autoimport_dir"] = self.add_widget_intelligent(
|
||||||
FileBox,
|
FileBox,
|
||||||
name=f"Folder to recursively scan for new checkpoints, ControlNets, LoRAs and TI models",
|
name="Folder to recursively scan for new checkpoints, ControlNets, LoRAs and TI models",
|
||||||
value=str(config.root_path / config.autoimport_dir),
|
value=str(config.root_path / config.autoimport_dir),
|
||||||
select_dir=True,
|
select_dir=True,
|
||||||
must_exist=False,
|
must_exist=False,
|
||||||
@ -476,6 +518,7 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
|
|||||||
"outdir",
|
"outdir",
|
||||||
"free_gpu_mem",
|
"free_gpu_mem",
|
||||||
"max_cache_size",
|
"max_cache_size",
|
||||||
|
"max_vram_cache_size",
|
||||||
"xformers_enabled",
|
"xformers_enabled",
|
||||||
"always_use_cpu",
|
"always_use_cpu",
|
||||||
]:
|
]:
|
||||||
@ -592,13 +635,13 @@ def maybe_create_models_yaml(root: Path):
|
|||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def run_console_ui(program_opts: Namespace, initfile: Path = None) -> (Namespace, Namespace):
|
def run_console_ui(program_opts: Namespace, initfile: Path = None) -> (Namespace, Namespace):
|
||||||
# parse_args() will read from init file if present
|
|
||||||
invokeai_opts = default_startup_options(initfile)
|
invokeai_opts = default_startup_options(initfile)
|
||||||
invokeai_opts.root = program_opts.root
|
invokeai_opts.root = program_opts.root
|
||||||
|
|
||||||
# The third argument is needed in the Windows 11 environment to
|
if not set_min_terminal_size(MIN_COLS, MIN_LINES):
|
||||||
# launch a console window running this program.
|
raise WindowTooSmallException(
|
||||||
set_min_terminal_size(MIN_COLS, MIN_LINES)
|
"Could not increase terminal size. Try running again with a larger window or smaller font size."
|
||||||
|
)
|
||||||
|
|
||||||
# the install-models application spawns a subprocess to install
|
# the install-models application spawns a subprocess to install
|
||||||
# models, and will crash unless this is set before running.
|
# models, and will crash unless this is set before running.
|
||||||
@ -654,10 +697,13 @@ def migrate_init_file(legacy_format: Path):
|
|||||||
old = legacy_parser.parse_args([f"@{str(legacy_format)}"])
|
old = legacy_parser.parse_args([f"@{str(legacy_format)}"])
|
||||||
new = InvokeAIAppConfig.get_config()
|
new = InvokeAIAppConfig.get_config()
|
||||||
|
|
||||||
fields = list(get_type_hints(InvokeAIAppConfig).keys())
|
fields = [x for x, y in InvokeAIAppConfig.__fields__.items() if y.field_info.extra.get("category") != "DEPRECATED"]
|
||||||
for attr in fields:
|
for attr in fields:
|
||||||
if hasattr(old, attr):
|
if hasattr(old, attr):
|
||||||
|
try:
|
||||||
setattr(new, attr, getattr(old, attr))
|
setattr(new, attr, getattr(old, attr))
|
||||||
|
except ValidationError as e:
|
||||||
|
print(f"* Ignoring incompatible value for field {attr}:\n {str(e)}")
|
||||||
|
|
||||||
# a few places where the field names have changed and we have to
|
# a few places where the field names have changed and we have to
|
||||||
# manually add in the new names/values
|
# manually add in the new names/values
|
||||||
@ -777,6 +823,7 @@ def main():
|
|||||||
|
|
||||||
models_to_download = default_user_selections(opt)
|
models_to_download = default_user_selections(opt)
|
||||||
new_init_file = config.root_path / "invokeai.yaml"
|
new_init_file = config.root_path / "invokeai.yaml"
|
||||||
|
|
||||||
if opt.yes_to_all:
|
if opt.yes_to_all:
|
||||||
write_default_options(opt, new_init_file)
|
write_default_options(opt, new_init_file)
|
||||||
init_options = Namespace(precision="float32" if opt.full_precision else "float16")
|
init_options = Namespace(precision="float32" if opt.full_precision else "float16")
|
||||||
@ -802,6 +849,8 @@ def main():
|
|||||||
postscript(errors=errors)
|
postscript(errors=errors)
|
||||||
if not opt.yes_to_all:
|
if not opt.yes_to_all:
|
||||||
input("Press any key to continue...")
|
input("Press any key to continue...")
|
||||||
|
except WindowTooSmallException as e:
|
||||||
|
logger.error(str(e))
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print("\nGoodbye! Come back soon.")
|
print("\nGoodbye! Come back soon.")
|
||||||
|
|
||||||
|
@ -595,8 +595,9 @@ class ModelManager(object):
|
|||||||
the combined format of the list_models() method.
|
the combined format of the list_models() method.
|
||||||
"""
|
"""
|
||||||
models = self.list_models(base_model, model_type, model_name)
|
models = self.list_models(base_model, model_type, model_name)
|
||||||
if len(models) > 1:
|
if len(models) >= 1:
|
||||||
return models[0]
|
return models[0]
|
||||||
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def list_models(
|
def list_models(
|
||||||
@ -990,7 +991,9 @@ class ModelManager(object):
|
|||||||
raise DuplicateModelException(f"Model with key {model_key} added twice")
|
raise DuplicateModelException(f"Model with key {model_key} added twice")
|
||||||
|
|
||||||
model_path = self.relative_model_path(model_path)
|
model_path = self.relative_model_path(model_path)
|
||||||
model_config: ModelConfigBase = model_class.probe_config(str(model_path))
|
model_config: ModelConfigBase = model_class.probe_config(
|
||||||
|
str(model_path), model_base=cur_base_model
|
||||||
|
)
|
||||||
self.models[model_key] = model_config
|
self.models[model_key] = model_config
|
||||||
new_models_found = True
|
new_models_found = True
|
||||||
except DuplicateModelException as e:
|
except DuplicateModelException as e:
|
||||||
|
@ -80,8 +80,10 @@ class StableDiffusionXLModel(DiffusersModel):
|
|||||||
raise Exception("Unkown stable diffusion 2.* model format")
|
raise Exception("Unkown stable diffusion 2.* model format")
|
||||||
|
|
||||||
if ckpt_config_path is None:
|
if ckpt_config_path is None:
|
||||||
# TO DO: implement picking
|
# avoid circular import
|
||||||
pass
|
from .stable_diffusion import _select_ckpt_config
|
||||||
|
|
||||||
|
ckpt_config_path = _select_ckpt_config(kwargs.get("model_base", BaseModelType.StableDiffusionXL), variant)
|
||||||
|
|
||||||
return cls.create_config(
|
return cls.create_config(
|
||||||
path=path,
|
path=path,
|
||||||
|
@ -28,7 +28,6 @@ from npyscreen import widget
|
|||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
|
|
||||||
from invokeai.backend.install.model_install_backend import (
|
from invokeai.backend.install.model_install_backend import (
|
||||||
ModelInstallList,
|
|
||||||
InstallSelections,
|
InstallSelections,
|
||||||
ModelInstall,
|
ModelInstall,
|
||||||
SchedulerPredictionType,
|
SchedulerPredictionType,
|
||||||
@ -41,12 +40,12 @@ from invokeai.frontend.install.widgets import (
|
|||||||
SingleSelectColumns,
|
SingleSelectColumns,
|
||||||
TextBox,
|
TextBox,
|
||||||
BufferBox,
|
BufferBox,
|
||||||
FileBox,
|
|
||||||
set_min_terminal_size,
|
set_min_terminal_size,
|
||||||
select_stable_diffusion_config_file,
|
select_stable_diffusion_config_file,
|
||||||
CyclingForm,
|
CyclingForm,
|
||||||
MIN_COLS,
|
MIN_COLS,
|
||||||
MIN_LINES,
|
MIN_LINES,
|
||||||
|
WindowTooSmallException,
|
||||||
)
|
)
|
||||||
from invokeai.app.services.config import InvokeAIAppConfig
|
from invokeai.app.services.config import InvokeAIAppConfig
|
||||||
|
|
||||||
@ -156,7 +155,7 @@ class addModelsForm(CyclingForm, npyscreen.FormMultiPage):
|
|||||||
BufferBox,
|
BufferBox,
|
||||||
name="Log Messages",
|
name="Log Messages",
|
||||||
editable=False,
|
editable=False,
|
||||||
max_height=15,
|
max_height=6,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.nextrely += 1
|
self.nextrely += 1
|
||||||
@ -693,7 +692,11 @@ def select_and_download_models(opt: Namespace):
|
|||||||
# needed to support the probe() method running under a subprocess
|
# needed to support the probe() method running under a subprocess
|
||||||
torch.multiprocessing.set_start_method("spawn")
|
torch.multiprocessing.set_start_method("spawn")
|
||||||
|
|
||||||
set_min_terminal_size(MIN_COLS, MIN_LINES)
|
if not set_min_terminal_size(MIN_COLS, MIN_LINES):
|
||||||
|
raise WindowTooSmallException(
|
||||||
|
"Could not increase terminal size. Try running again with a larger window or smaller font size."
|
||||||
|
)
|
||||||
|
|
||||||
installApp = AddModelApplication(opt)
|
installApp = AddModelApplication(opt)
|
||||||
try:
|
try:
|
||||||
installApp.run()
|
installApp.run()
|
||||||
@ -787,6 +790,8 @@ def main():
|
|||||||
curses.echo()
|
curses.echo()
|
||||||
curses.endwin()
|
curses.endwin()
|
||||||
logger.info("Goodbye! Come back soon.")
|
logger.info("Goodbye! Come back soon.")
|
||||||
|
except WindowTooSmallException as e:
|
||||||
|
logger.error(str(e))
|
||||||
except widget.NotEnoughSpaceForWidget as e:
|
except widget.NotEnoughSpaceForWidget as e:
|
||||||
if str(e).startswith("Height of 1 allocated"):
|
if str(e).startswith("Height of 1 allocated"):
|
||||||
logger.error("Insufficient vertical space for the interface. Please make your window taller and try again")
|
logger.error("Insufficient vertical space for the interface. Please make your window taller and try again")
|
||||||
|
@ -21,13 +21,19 @@ MIN_COLS = 130
|
|||||||
MIN_LINES = 38
|
MIN_LINES = 38
|
||||||
|
|
||||||
|
|
||||||
|
class WindowTooSmallException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def set_terminal_size(columns: int, lines: int):
|
def set_terminal_size(columns: int, lines: int) -> bool:
|
||||||
|
OS = platform.uname().system
|
||||||
|
screen_ok = False
|
||||||
|
while not screen_ok:
|
||||||
ts = get_terminal_size()
|
ts = get_terminal_size()
|
||||||
width = max(columns, ts.columns)
|
width = max(columns, ts.columns)
|
||||||
height = max(lines, ts.lines)
|
height = max(lines, ts.lines)
|
||||||
|
|
||||||
OS = platform.uname().system
|
|
||||||
if OS == "Windows":
|
if OS == "Windows":
|
||||||
pass
|
pass
|
||||||
# not working reliably - ask user to adjust the window
|
# not working reliably - ask user to adjust the window
|
||||||
@ -37,15 +43,18 @@ def set_terminal_size(columns: int, lines: int):
|
|||||||
|
|
||||||
# check whether it worked....
|
# check whether it worked....
|
||||||
ts = get_terminal_size()
|
ts = get_terminal_size()
|
||||||
pause = False
|
if ts.columns < columns or ts.lines < lines:
|
||||||
if ts.columns < columns:
|
print(
|
||||||
print("\033[1mThis window is too narrow for the user interface.\033[0m")
|
f"\033[1mThis window is too small for the interface. InvokeAI requires {columns}x{lines} (w x h) characters, but window is {ts.columns}x{ts.lines}\033[0m"
|
||||||
pause = True
|
)
|
||||||
if ts.lines < lines:
|
resp = input(
|
||||||
print("\033[1mThis window is too short for the user interface.\033[0m")
|
"Maximize the window and/or decrease the font size then press any key to continue. Type [Q] to give up.."
|
||||||
pause = True
|
)
|
||||||
if pause:
|
if resp.upper().startswith("Q"):
|
||||||
input("Maximize the window then press any key to continue..")
|
break
|
||||||
|
else:
|
||||||
|
screen_ok = True
|
||||||
|
return screen_ok
|
||||||
|
|
||||||
|
|
||||||
def _set_terminal_size_powershell(width: int, height: int):
|
def _set_terminal_size_powershell(width: int, height: int):
|
||||||
@ -80,14 +89,14 @@ def _set_terminal_size_unix(width: int, height: int):
|
|||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
|
||||||
def set_min_terminal_size(min_cols: int, min_lines: int):
|
def set_min_terminal_size(min_cols: int, min_lines: int) -> bool:
|
||||||
# make sure there's enough room for the ui
|
# make sure there's enough room for the ui
|
||||||
term_cols, term_lines = get_terminal_size()
|
term_cols, term_lines = get_terminal_size()
|
||||||
if term_cols >= min_cols and term_lines >= min_lines:
|
if term_cols >= min_cols and term_lines >= min_lines:
|
||||||
return
|
return True
|
||||||
cols = max(term_cols, min_cols)
|
cols = max(term_cols, min_cols)
|
||||||
lines = max(term_lines, min_lines)
|
lines = max(term_lines, min_lines)
|
||||||
set_terminal_size(cols, lines)
|
return set_terminal_size(cols, lines)
|
||||||
|
|
||||||
|
|
||||||
class IntSlider(npyscreen.Slider):
|
class IntSlider(npyscreen.Slider):
|
||||||
@ -164,7 +173,7 @@ class FloatSlider(npyscreen.Slider):
|
|||||||
|
|
||||||
|
|
||||||
class FloatTitleSlider(npyscreen.TitleText):
|
class FloatTitleSlider(npyscreen.TitleText):
|
||||||
_entry_type = FloatSlider
|
_entry_type = npyscreen.Slider
|
||||||
|
|
||||||
|
|
||||||
class SelectColumnBase:
|
class SelectColumnBase:
|
||||||
|
File diff suppressed because one or more lines are too long
@ -1,4 +1,4 @@
|
|||||||
import{B as m,g7 as Je,A as y,a5 as Ka,g8 as Xa,af as va,aj as d,g9 as b,ga as t,gb as Ya,gc as h,gd as ua,ge as Ja,gf as Qa,aL as Za,gg as et,ad as rt,gh as at}from"./index-de589048.js";import{s as fa,n as o,t as tt,o as ha,p as ot,q as ma,v as ga,w as ya,x as it,y as Sa,z as pa,A as xr,B as nt,D as lt,E as st,F as xa,G as $a,H as ka,J as dt,K as _a,L as ct,M as bt,N as vt,O as ut,Q as wa,R as ft,S as ht,T as mt,U as gt,V as yt,W as St,e as pt,X as xt}from"./menu-11348abc.js";var za=String.raw,Ca=za`
|
import{B as m,g7 as Je,A as y,a5 as Ka,g8 as Xa,af as va,aj as d,g9 as b,ga as t,gb as Ya,gc as h,gd as ua,ge as Ja,gf as Qa,aL as Za,gg as et,ad as rt,gh as at}from"./index-dd054634.js";import{s as fa,n as o,t as tt,o as ha,p as ot,q as ma,v as ga,w as ya,x as it,y as Sa,z as pa,A as xr,B as nt,D as lt,E as st,F as xa,G as $a,H as ka,J as dt,K as _a,L as ct,M as bt,N as vt,O as ut,Q as wa,R as ft,S as ht,T as mt,U as gt,V as yt,W as St,e as pt,X as xt}from"./menu-b42141e3.js";var za=String.raw,Ca=za`
|
||||||
:root,
|
:root,
|
||||||
:host {
|
:host {
|
||||||
--chakra-vh: 100vh;
|
--chakra-vh: 100vh;
|
151
invokeai/frontend/web/dist/assets/index-dd054634.js
vendored
Normal file
151
invokeai/frontend/web/dist/assets/index-dd054634.js
vendored
Normal file
File diff suppressed because one or more lines are too long
151
invokeai/frontend/web/dist/assets/index-de589048.js
vendored
151
invokeai/frontend/web/dist/assets/index-de589048.js
vendored
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
2
invokeai/frontend/web/dist/index.html
vendored
2
invokeai/frontend/web/dist/index.html
vendored
@ -12,7 +12,7 @@
|
|||||||
margin: 0;
|
margin: 0;
|
||||||
}
|
}
|
||||||
</style>
|
</style>
|
||||||
<script type="module" crossorigin src="./assets/index-de589048.js"></script>
|
<script type="module" crossorigin src="./assets/index-dd054634.js"></script>
|
||||||
</head>
|
</head>
|
||||||
|
|
||||||
<body dir="ltr">
|
<body dir="ltr">
|
||||||
|
@ -1 +1 @@
|
|||||||
__version__ = "3.0.2a1"
|
__version__ = "3.0.2rc1"
|
||||||
|
@ -77,7 +77,7 @@ dependencies = [
|
|||||||
"realesrgan",
|
"realesrgan",
|
||||||
"requests~=2.28.2",
|
"requests~=2.28.2",
|
||||||
"rich~=13.3",
|
"rich~=13.3",
|
||||||
"safetensors~=0.3.0",
|
"safetensors==0.3.1",
|
||||||
"scikit-image~=0.21.0",
|
"scikit-image~=0.21.0",
|
||||||
"send2trash",
|
"send2trash",
|
||||||
"test-tube~=0.7.5",
|
"test-tube~=0.7.5",
|
||||||
|
Loading…
Reference in New Issue
Block a user