From 60213893ab1d80bd01981d47be059d8f6a2af9c7 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Sat, 4 Feb 2023 12:00:17 -0500 Subject: [PATCH] configuration script tidying up - Rename configure_invokeai.py to invokeai_configure.py to be consistent with installed script name - Remove warning message about half-precision models not being available during the model download process. - adjust estimated file size reported by configure - guesstimate disk space needed for "all" models - fix up the "latest" tag to be named 'v2.3-latest' --- installer/create_installer.sh | 7 ++++--- installer/installer.py | 4 ++-- ldm/invoke/CLI.py | 4 ++-- .../{configure_invokeai.py => invokeai_configure.py} | 10 ++++------ ldm/invoke/model_manager.py | 7 +++---- pyproject.toml | 4 ++-- scripts/configure_invokeai.py | 4 ++-- 7 files changed, 19 insertions(+), 21 deletions(-) rename ldm/invoke/config/{configure_invokeai.py => invokeai_configure.py} (99%) diff --git a/installer/create_installer.sh b/installer/create_installer.sh index e3b3e19113..e8bcee930d 100755 --- a/installer/create_installer.sh +++ b/installer/create_installer.sh @@ -14,12 +14,13 @@ fi VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)") PATCH="" VERSION="v${VERSION}${PATCH}" +LATEST_TAG="v2.3-latest" echo Building installer for version $VERSION echo "Be certain that you're in the 'installer' directory before continuing." read -p "Press any key to continue, or CTRL-C to exit..." -read -e -p "Commit and tag this repo with ${VERSION} and 'v2.3-latest'? [n]: " input +read -e -p "Commit and tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input RESPONSE=${input:='n'} if [ "$RESPONSE" == 'y' ]; then git commit -a @@ -28,8 +29,8 @@ if [ "$RESPONSE" == 'y' ]; then echo "Existing/invalid tag" exit -1 fi - git push origin :refs/tags/v2.3-latest - git tag -fa latest + git push origin :refs/tags/$LATEST_TAG + git tag -fa $LATEST_TAG fi # ---------------------- diff --git a/installer/installer.py b/installer/installer.py index dfdc21730e..efc1d727ab 100644 --- a/installer/installer.py +++ b/installer/installer.py @@ -339,7 +339,7 @@ class InvokeAiInstance: introduction() - from ldm.invoke.config import configure_invokeai + from ldm.invoke.config import invokeai_configure # NOTE: currently the config script does its own arg parsing! this means the command-line switches # from the installer will also automatically propagate down to the config script. @@ -347,7 +347,7 @@ class InvokeAiInstance: # set sys.argv to a consistent state - configure_invokeai.main() + invokeai_configure.main() def install_user_scripts(self): """ diff --git a/ldm/invoke/CLI.py b/ldm/invoke/CLI.py index ca4153f53d..aba8f6e1ce 100644 --- a/ldm/invoke/CLI.py +++ b/ldm/invoke/CLI.py @@ -1133,8 +1133,8 @@ def report_model_error(opt:Namespace, e:Exception): for arg in yes_to_all.split(): sys.argv.append(arg) - from ldm.invoke.config import configure_invokeai - configure_invokeai.main() + from ldm.invoke.config import invokeai_configure + invokeai_configure.main() print('** InvokeAI will now restart') sys.argv = previous_args main() # would rather do a os.exec(), but doesn't exist? diff --git a/ldm/invoke/config/configure_invokeai.py b/ldm/invoke/config/invokeai_configure.py similarity index 99% rename from ldm/invoke/config/configure_invokeai.py rename to ldm/invoke/config/invokeai_configure.py index 42639c5fc9..e04f457a62 100755 --- a/ldm/invoke/config/configure_invokeai.py +++ b/ldm/invoke/config/invokeai_configure.py @@ -127,8 +127,8 @@ script do it for you. Manual installation is described at: https://invoke-ai.github.io/InvokeAI/installation/020_INSTALL_MANUAL/ -You may download the recommended models (about 10GB total), select a customized set, or -completely skip this step. +You may download the recommended models (about 15GB total), install all models (40 GB!!) +select a customized set, or completely skip this step. """ ) completer.set_options(["recommended", "customized", "skip"]) @@ -435,9 +435,7 @@ def _download_diffusion_weights( ) except OSError as e: if str(e).startswith("fp16 is not a valid"): - print( - f"Could not fetch half-precision version of model {repo_id}; fetching full-precision instead" - ) + pass else: print(f"An unexpected error occurred while downloading the model: {e})") if path: @@ -868,7 +866,7 @@ def initialize_rootdir(root: str, yes_to_all: bool = False): ): os.makedirs(os.path.join(root, name), exist_ok=True) - configs_src = Path(configs.__path__[-1]) + configs_src = Path(configs.__path__[0]) configs_dest = Path(root) / "configs" if not os.path.samefile(configs_src, configs_dest): shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True) diff --git a/ldm/invoke/model_manager.py b/ldm/invoke/model_manager.py index 466b2f25b8..b9421255a2 100644 --- a/ldm/invoke/model_manager.py +++ b/ldm/invoke/model_manager.py @@ -484,12 +484,11 @@ class ModelManager(object): **pipeline_args, **fp_args, ) - except OSError as e: if str(e).startswith('fp16 is not a valid'): - print(f'Could not fetch half-precision version of model {name_or_path}; fetching full-precision instead') + pass else: - print(f'An unexpected error occurred while downloading the model: {e})') + print(f'** An unexpected error occurred while downloading the model: {e})') if pipeline: break @@ -1040,7 +1039,7 @@ class ModelManager(object): vae = AutoencoderKL.from_pretrained(name_or_path, **vae_args, **fp_args) except OSError as e: if str(e).startswith('fp16 is not a valid'): - print(' | Half-precision version of model not available; fetching full-precision instead') + pass else: deferred_error = e if vae: diff --git a/pyproject.toml b/pyproject.toml index 02d8effc31..18df1e2e2f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -98,13 +98,13 @@ dependencies = [ # legacy entrypoints; provided for backwards compatibility "invoke.py" = "ldm.invoke.CLI:main" -"configure_invokeai.py" = "ldm.invoke.config.configure_invokeai:main" +"configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main" "textual_inversion.py" = "ldm.invoke.training.textual_inversion:main" "merge_embeddings.py" = "ldm.invoke.merge_diffusers:main" # modern entrypoints "invokeai" = "ldm.invoke.CLI:main" -"invokeai-configure" = "ldm.invoke.config.configure_invokeai:main" +"invokeai-configure" = "ldm.invoke.config.invokeai_configure:main" "invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging "invokeai-ti" = "ldm.invoke.training.textual_inversion:main" diff --git a/scripts/configure_invokeai.py b/scripts/configure_invokeai.py index e22bec32fc..661ad124d9 100644 --- a/scripts/configure_invokeai.py +++ b/scripts/configure_invokeai.py @@ -2,8 +2,8 @@ # Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein) import warnings -from ldm.invoke.config import configure_invokeai +from ldm.invoke.config import invokeai_configure if __name__ == '__main__': - warnings.warn("configire_invokeai.py is deprecated, please run 'invoke'", DeprecationWarning) + warnings.warn("configure_invokeai.py is deprecated, please run 'invokai-configure'", DeprecationWarning) configure_invokeai.main()