Merge branch 'main' into 2.3.0rc4

This commit is contained in:
Lincoln Stein 2023-02-05 10:24:09 -05:00 committed by GitHub
commit 1e793a2dfe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 20 additions and 21 deletions

8
installer/create_installer.sh Executable file → Normal file
View File

@ -14,12 +14,13 @@ fi
VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)") VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)")
PATCH="" PATCH=""
VERSION="v${VERSION}${PATCH}" VERSION="v${VERSION}${PATCH}"
LATEST_TAG="v2.3-latest"
echo Building installer for version $VERSION echo Building installer for version $VERSION
echo "Be certain that you're in the 'installer' directory before continuing." echo "Be certain that you're in the 'installer' directory before continuing."
read -p "Press any key to continue, or CTRL-C to exit..." read -p "Press any key to continue, or CTRL-C to exit..."
read -e -p "Commit and tag this repo with ${VERSION} and 'v2.3-latest'? [n]: " input read -e -p "Commit and tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
RESPONSE=${input:='n'} RESPONSE=${input:='n'}
if [ "$RESPONSE" == 'y' ]; then if [ "$RESPONSE" == 'y' ]; then
git commit -a git commit -a
@ -28,8 +29,9 @@ if [ "$RESPONSE" == 'y' ]; then
echo "Existing/invalid tag" echo "Existing/invalid tag"
exit -1 exit -1
fi fi
git push origin :refs/tags/v2.3-latest
git tag -fa v2.3-latest git push origin :refs/tags/$LATEST_TAG
git tag -fa $LATEST_TAG
fi fi
# ---------------------- # ----------------------

View File

@ -339,7 +339,7 @@ class InvokeAiInstance:
introduction() introduction()
from ldm.invoke.config import configure_invokeai from ldm.invoke.config import invokeai_configure
# NOTE: currently the config script does its own arg parsing! this means the command-line switches # NOTE: currently the config script does its own arg parsing! this means the command-line switches
# from the installer will also automatically propagate down to the config script. # from the installer will also automatically propagate down to the config script.
@ -347,7 +347,7 @@ class InvokeAiInstance:
# set sys.argv to a consistent state # set sys.argv to a consistent state
configure_invokeai.main() invokeai_configure.main()
def install_user_scripts(self): def install_user_scripts(self):
""" """

View File

@ -1133,8 +1133,8 @@ def report_model_error(opt:Namespace, e:Exception):
for arg in yes_to_all.split(): for arg in yes_to_all.split():
sys.argv.append(arg) sys.argv.append(arg)
from ldm.invoke.config import configure_invokeai from ldm.invoke.config import invokeai_configure
configure_invokeai.main() invokeai_configure.main()
print('** InvokeAI will now restart') print('** InvokeAI will now restart')
sys.argv = previous_args sys.argv = previous_args
main() # would rather do a os.exec(), but doesn't exist? main() # would rather do a os.exec(), but doesn't exist?

View File

@ -127,8 +127,8 @@ script do it for you. Manual installation is described at:
https://invoke-ai.github.io/InvokeAI/installation/020_INSTALL_MANUAL/ https://invoke-ai.github.io/InvokeAI/installation/020_INSTALL_MANUAL/
You may download the recommended models (about 10GB total), select a customized set, or You may download the recommended models (about 15GB total), install all models (40 GB!!)
completely skip this step. select a customized set, or completely skip this step.
""" """
) )
completer.set_options(["recommended", "customized", "skip"]) completer.set_options(["recommended", "customized", "skip"])
@ -435,9 +435,7 @@ def _download_diffusion_weights(
) )
except OSError as e: except OSError as e:
if str(e).startswith("fp16 is not a valid"): if str(e).startswith("fp16 is not a valid"):
print( pass
f"Could not fetch half-precision version of model {repo_id}; fetching full-precision instead"
)
else: else:
print(f"An unexpected error occurred while downloading the model: {e})") print(f"An unexpected error occurred while downloading the model: {e})")
if path: if path:
@ -868,7 +866,7 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
): ):
os.makedirs(os.path.join(root, name), exist_ok=True) os.makedirs(os.path.join(root, name), exist_ok=True)
configs_src = Path(configs.__path__[-1]) configs_src = Path(configs.__path__[0])
configs_dest = Path(root) / "configs" configs_dest = Path(root) / "configs"
if not os.path.samefile(configs_src, configs_dest): if not os.path.samefile(configs_src, configs_dest):
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True) shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)

View File

@ -484,12 +484,11 @@ class ModelManager(object):
**pipeline_args, **pipeline_args,
**fp_args, **fp_args,
) )
except OSError as e: except OSError as e:
if str(e).startswith('fp16 is not a valid'): if str(e).startswith('fp16 is not a valid'):
print(f'Could not fetch half-precision version of model {name_or_path}; fetching full-precision instead') pass
else: else:
print(f'An unexpected error occurred while downloading the model: {e})') print(f'** An unexpected error occurred while downloading the model: {e})')
if pipeline: if pipeline:
break break
@ -1040,7 +1039,7 @@ class ModelManager(object):
vae = AutoencoderKL.from_pretrained(name_or_path, **vae_args, **fp_args) vae = AutoencoderKL.from_pretrained(name_or_path, **vae_args, **fp_args)
except OSError as e: except OSError as e:
if str(e).startswith('fp16 is not a valid'): if str(e).startswith('fp16 is not a valid'):
print(' | Half-precision version of model not available; fetching full-precision instead') pass
else: else:
deferred_error = e deferred_error = e
if vae: if vae:

View File

@ -98,13 +98,13 @@ dependencies = [
# legacy entrypoints; provided for backwards compatibility # legacy entrypoints; provided for backwards compatibility
"invoke.py" = "ldm.invoke.CLI:main" "invoke.py" = "ldm.invoke.CLI:main"
"configure_invokeai.py" = "ldm.invoke.config.configure_invokeai:main" "configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main"
"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main" "textual_inversion.py" = "ldm.invoke.training.textual_inversion:main"
"merge_embeddings.py" = "ldm.invoke.merge_diffusers:main" "merge_embeddings.py" = "ldm.invoke.merge_diffusers:main"
# modern entrypoints # modern entrypoints
"invokeai" = "ldm.invoke.CLI:main" "invokeai" = "ldm.invoke.CLI:main"
"invokeai-configure" = "ldm.invoke.config.configure_invokeai:main" "invokeai-configure" = "ldm.invoke.config.invokeai_configure:main"
"invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging "invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging
"invokeai-ti" = "ldm.invoke.training.textual_inversion:main" "invokeai-ti" = "ldm.invoke.training.textual_inversion:main"

View File

@ -2,8 +2,8 @@
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein) # Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
import warnings import warnings
from ldm.invoke.config import configure_invokeai from ldm.invoke.config import invokeai_configure
if __name__ == '__main__': if __name__ == '__main__':
warnings.warn("configire_invokeai.py is deprecated, please run 'invoke'", DeprecationWarning) warnings.warn("configure_invokeai.py is deprecated, please run 'invokai-configure'", DeprecationWarning)
configure_invokeai.main() configure_invokeai.main()