diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index 264a93955c..1509176478 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -175,6 +175,7 @@ DB_FILE = Path("invokeai.db") LEGACY_INIT_FILE = Path("invokeai.init") DEFAULT_MAX_VRAM = 2.75 + class InvokeAISettings(BaseSettings): """ Runtime configuration settings in which default values are diff --git a/invokeai/backend/install/invokeai_configure.py b/invokeai/backend/install/invokeai_configure.py index 2e95993d75..6cf1101b4c 100755 --- a/invokeai/backend/install/invokeai_configure.py +++ b/invokeai/backend/install/invokeai_configure.py @@ -82,7 +82,7 @@ PRECISION_CHOICES = ["auto", "float16", "float32"] HAS_CUDA = torch.cuda.is_available() _, MAX_VRAM = torch.cuda.mem_get_info() if HAS_CUDA else (0, 0) MAX_VRAM /= 1073741824 # GB in bytes -MAX_VRAM_CACHE_RATIO = 0.55 # first guess of optimal vram cache based on total available +MAX_VRAM_CACHE_RATIO = 0.55 # first guess of optimal vram cache based on total available INIT_FILE_PREAMBLE = """# InvokeAI initialization file # This is the InvokeAI initialization file, which contains command-line default values. @@ -571,9 +571,10 @@ def default_startup_options(init_file: Path) -> Namespace: opts = InvokeAIAppConfig.get_config() # dynamically adust vram for memory size if not init_file.exists(): - opts.max_vram_cache_size = round((MAX_VRAM * MAX_VRAM_CACHE_RATIO)*4) / 4 + opts.max_vram_cache_size = round((MAX_VRAM * MAX_VRAM_CACHE_RATIO) * 4) / 4 return opts + def default_user_selections(program_opts: Namespace) -> InstallSelections: try: installer = ModelInstall(config) @@ -713,6 +714,7 @@ def migrate_init_file(legacy_format: Path): # ------------------------------------- def migrate_models(root: Path): from invokeai.backend.install.migrate_to_3 import do_migrate + do_migrate(root, root)