mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix invokeai-configure to use isolated argument-parsing pattern
This commit is contained in:
parent
d871fca643
commit
5d16a40b95
@ -6,7 +6,6 @@
|
|||||||
#
|
#
|
||||||
# Coauthor: Kevin Turner http://github.com/keturn
|
# Coauthor: Kevin Turner http://github.com/keturn
|
||||||
#
|
#
|
||||||
import argparse
|
|
||||||
import io
|
import io
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
@ -38,7 +37,6 @@ from invokeai.backend.install.install_helper import InstallHelper, InstallSelect
|
|||||||
from invokeai.backend.model_manager import ModelType
|
from invokeai.backend.model_manager import ModelType
|
||||||
from invokeai.backend.util import choose_precision, choose_torch_device
|
from invokeai.backend.util import choose_precision, choose_torch_device
|
||||||
from invokeai.backend.util.logging import InvokeAILogger
|
from invokeai.backend.util.logging import InvokeAILogger
|
||||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
|
||||||
from invokeai.frontend.install.model_install import addModelsForm
|
from invokeai.frontend.install.model_install import addModelsForm
|
||||||
|
|
||||||
# TO DO - Move all the frontend code into invokeai.frontend.install
|
# TO DO - Move all the frontend code into invokeai.frontend.install
|
||||||
@ -742,57 +740,12 @@ def is_v2_install(root: Path) -> bool:
|
|||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def main() -> None:
|
def main(opt: Namespace) -> None:
|
||||||
global FORCE_FULL_PRECISION # FIXME
|
global FORCE_FULL_PRECISION # FIXME
|
||||||
global config
|
global config
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
|
||||||
parser.add_argument(
|
|
||||||
"--skip-sd-weights",
|
|
||||||
dest="skip_sd_weights",
|
|
||||||
action=argparse.BooleanOptionalAction,
|
|
||||||
default=False,
|
|
||||||
help="skip downloading the large Stable Diffusion weight files",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--skip-support-models",
|
|
||||||
dest="skip_support_models",
|
|
||||||
action=argparse.BooleanOptionalAction,
|
|
||||||
default=False,
|
|
||||||
help="skip downloading the support models",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--full-precision",
|
|
||||||
dest="full_precision",
|
|
||||||
action=argparse.BooleanOptionalAction,
|
|
||||||
type=bool,
|
|
||||||
default=False,
|
|
||||||
help="use 32-bit weights instead of faster 16-bit weights",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--yes",
|
|
||||||
"-y",
|
|
||||||
dest="yes_to_all",
|
|
||||||
action="store_true",
|
|
||||||
help='answer "yes" to all prompts',
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--default_only",
|
|
||||||
action="store_true",
|
|
||||||
help="when --yes specified, only install the default model",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--root_dir",
|
|
||||||
dest="root",
|
|
||||||
type=str,
|
|
||||||
default=None,
|
|
||||||
help="path to root of install directory",
|
|
||||||
)
|
|
||||||
|
|
||||||
opt = parser.parse_args()
|
|
||||||
updates: dict[str, Any] = {}
|
updates: dict[str, Any] = {}
|
||||||
|
|
||||||
InvokeAIArgs.args = opt
|
|
||||||
config = get_config()
|
config = get_config()
|
||||||
if opt.full_precision:
|
if opt.full_precision:
|
||||||
updates["precision"] = "float32"
|
updates["precision"] = "float32"
|
||||||
|
@ -2,6 +2,59 @@
|
|||||||
Wrapper for invokeai.backend.configure.invokeai_configure
|
Wrapper for invokeai.backend.configure.invokeai_configure
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from ...backend.install.invokeai_configure import main as invokeai_configure # noqa: F401
|
import argparse
|
||||||
|
|
||||||
__all__ = ["invokeai_configure"]
|
|
||||||
|
def run_configure() -> None:
|
||||||
|
# Before doing _anything_, parse CLI args!
|
||||||
|
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
|
||||||
|
parser.add_argument(
|
||||||
|
"--skip-sd-weights",
|
||||||
|
dest="skip_sd_weights",
|
||||||
|
action=argparse.BooleanOptionalAction,
|
||||||
|
default=False,
|
||||||
|
help="skip downloading the large Stable Diffusion weight files",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--skip-support-models",
|
||||||
|
dest="skip_support_models",
|
||||||
|
action=argparse.BooleanOptionalAction,
|
||||||
|
default=False,
|
||||||
|
help="skip downloading the support models",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--full-precision",
|
||||||
|
dest="full_precision",
|
||||||
|
action=argparse.BooleanOptionalAction,
|
||||||
|
type=bool,
|
||||||
|
default=False,
|
||||||
|
help="use 32-bit weights instead of faster 16-bit weights",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--yes",
|
||||||
|
"-y",
|
||||||
|
dest="yes_to_all",
|
||||||
|
action="store_true",
|
||||||
|
help='answer "yes" to all prompts',
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--default_only",
|
||||||
|
action="store_true",
|
||||||
|
help="when --yes specified, only install the default model",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--root_dir",
|
||||||
|
dest="root",
|
||||||
|
type=str,
|
||||||
|
default=None,
|
||||||
|
help="path to root of install directory",
|
||||||
|
)
|
||||||
|
|
||||||
|
opt = parser.parse_args()
|
||||||
|
InvokeAIArgs.args = opt
|
||||||
|
|
||||||
|
from invokeai.backend.install.invokeai_configure import main as invokeai_configure
|
||||||
|
|
||||||
|
invokeai_configure(opt)
|
||||||
|
@ -127,7 +127,7 @@ dependencies = [
|
|||||||
[project.scripts]
|
[project.scripts]
|
||||||
|
|
||||||
# legacy entrypoints; provided for backwards compatibility
|
# legacy entrypoints; provided for backwards compatibility
|
||||||
"configure_invokeai.py" = "invokeai.frontend.install.invokeai_configure:invokeai_configure"
|
"configure_invokeai.py" = "invokeai.frontend.install.invokeai_configure:run_configure"
|
||||||
"textual_inversion.py" = "invokeai.frontend.training:invokeai_textual_inversion"
|
"textual_inversion.py" = "invokeai.frontend.training:invokeai_textual_inversion"
|
||||||
|
|
||||||
# shortcut commands to start web ui
|
# shortcut commands to start web ui
|
||||||
@ -138,7 +138,7 @@ dependencies = [
|
|||||||
"invokeai-web" = "invokeai.app.run_app:run_app"
|
"invokeai-web" = "invokeai.app.run_app:run_app"
|
||||||
|
|
||||||
# full commands
|
# full commands
|
||||||
"invokeai-configure" = "invokeai.frontend.install.invokeai_configure:invokeai_configure"
|
"invokeai-configure" = "invokeai.frontend.install.invokeai_configure:run_configure"
|
||||||
"invokeai-merge" = "invokeai.frontend.merge.merge_diffusers:main"
|
"invokeai-merge" = "invokeai.frontend.merge.merge_diffusers:main"
|
||||||
"invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion"
|
"invokeai-ti" = "invokeai.frontend.training:invokeai_textual_inversion"
|
||||||
"invokeai-model-install" = "invokeai.frontend.install.model_install:main"
|
"invokeai-model-install" = "invokeai.frontend.install.model_install:main"
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
from invokeai.frontend.install.invokeai_configure import invokeai_configure as configure
|
from invokeai.frontend.install.invokeai_configure import run_configure as configure
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
warnings.warn(
|
warnings.warn(
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
import sys
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from tempfile import TemporaryDirectory
|
from tempfile import TemporaryDirectory
|
||||||
from typing import Any
|
from typing import Any
|
||||||
@ -8,7 +7,6 @@ from omegaconf import OmegaConf
|
|||||||
from pydantic import ValidationError
|
from pydantic import ValidationError
|
||||||
|
|
||||||
from invokeai.app.services.config.config_default import InvokeAIAppConfig, get_config, load_and_migrate_config
|
from invokeai.app.services.config.config_default import InvokeAIAppConfig, get_config, load_and_migrate_config
|
||||||
from invokeai.frontend.cli.arg_parser import InvokeAIArgs
|
|
||||||
|
|
||||||
v4_config = """
|
v4_config = """
|
||||||
schema_version: 4
|
schema_version: 4
|
||||||
@ -78,13 +76,6 @@ def test_read_config_from_file(tmp_path: Path):
|
|||||||
assert config.port == 8080
|
assert config.port == 8080
|
||||||
|
|
||||||
|
|
||||||
def test_arg_parsing():
|
|
||||||
sys.argv = ["test_config.py", "--root", "/tmp"]
|
|
||||||
InvokeAIArgs.parse_args()
|
|
||||||
config = get_config()
|
|
||||||
assert config.root_path == Path("/tmp")
|
|
||||||
|
|
||||||
|
|
||||||
def test_migrate_v3_config_from_file(tmp_path: Path):
|
def test_migrate_v3_config_from_file(tmp_path: Path):
|
||||||
"""Test reading configuration from a file."""
|
"""Test reading configuration from a file."""
|
||||||
temp_config_file = tmp_path / "temp_invokeai.yaml"
|
temp_config_file = tmp_path / "temp_invokeai.yaml"
|
||||||
|
Loading…
Reference in New Issue
Block a user