mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge branch 'main' into 2.3.0rc4
This commit is contained in:
commit
1e793a2dfe
8
installer/create_installer.sh
Executable file → Normal file
8
installer/create_installer.sh
Executable file → Normal file
@ -14,12 +14,13 @@ fi
|
||||
VERSION=$(cd ..; python -c "from ldm.invoke import __version__ as version; print(version)")
|
||||
PATCH=""
|
||||
VERSION="v${VERSION}${PATCH}"
|
||||
LATEST_TAG="v2.3-latest"
|
||||
|
||||
echo Building installer for version $VERSION
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
read -e -p "Commit and tag this repo with ${VERSION} and 'v2.3-latest'? [n]: " input
|
||||
read -e -p "Commit and tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
||||
RESPONSE=${input:='n'}
|
||||
if [ "$RESPONSE" == 'y' ]; then
|
||||
git commit -a
|
||||
@ -28,8 +29,9 @@ if [ "$RESPONSE" == 'y' ]; then
|
||||
echo "Existing/invalid tag"
|
||||
exit -1
|
||||
fi
|
||||
git push origin :refs/tags/v2.3-latest
|
||||
git tag -fa v2.3-latest
|
||||
|
||||
git push origin :refs/tags/$LATEST_TAG
|
||||
git tag -fa $LATEST_TAG
|
||||
fi
|
||||
|
||||
# ----------------------
|
||||
|
@ -339,7 +339,7 @@ class InvokeAiInstance:
|
||||
|
||||
introduction()
|
||||
|
||||
from ldm.invoke.config import configure_invokeai
|
||||
from ldm.invoke.config import invokeai_configure
|
||||
|
||||
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
||||
# from the installer will also automatically propagate down to the config script.
|
||||
@ -347,7 +347,7 @@ class InvokeAiInstance:
|
||||
|
||||
# set sys.argv to a consistent state
|
||||
|
||||
configure_invokeai.main()
|
||||
invokeai_configure.main()
|
||||
|
||||
def install_user_scripts(self):
|
||||
"""
|
||||
|
@ -1133,8 +1133,8 @@ def report_model_error(opt:Namespace, e:Exception):
|
||||
for arg in yes_to_all.split():
|
||||
sys.argv.append(arg)
|
||||
|
||||
from ldm.invoke.config import configure_invokeai
|
||||
configure_invokeai.main()
|
||||
from ldm.invoke.config import invokeai_configure
|
||||
invokeai_configure.main()
|
||||
print('** InvokeAI will now restart')
|
||||
sys.argv = previous_args
|
||||
main() # would rather do a os.exec(), but doesn't exist?
|
||||
|
@ -127,8 +127,8 @@ script do it for you. Manual installation is described at:
|
||||
|
||||
https://invoke-ai.github.io/InvokeAI/installation/020_INSTALL_MANUAL/
|
||||
|
||||
You may download the recommended models (about 10GB total), select a customized set, or
|
||||
completely skip this step.
|
||||
You may download the recommended models (about 15GB total), install all models (40 GB!!)
|
||||
select a customized set, or completely skip this step.
|
||||
"""
|
||||
)
|
||||
completer.set_options(["recommended", "customized", "skip"])
|
||||
@ -435,9 +435,7 @@ def _download_diffusion_weights(
|
||||
)
|
||||
except OSError as e:
|
||||
if str(e).startswith("fp16 is not a valid"):
|
||||
print(
|
||||
f"Could not fetch half-precision version of model {repo_id}; fetching full-precision instead"
|
||||
)
|
||||
pass
|
||||
else:
|
||||
print(f"An unexpected error occurred while downloading the model: {e})")
|
||||
if path:
|
||||
@ -868,7 +866,7 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
|
||||
):
|
||||
os.makedirs(os.path.join(root, name), exist_ok=True)
|
||||
|
||||
configs_src = Path(configs.__path__[-1])
|
||||
configs_src = Path(configs.__path__[0])
|
||||
configs_dest = Path(root) / "configs"
|
||||
if not os.path.samefile(configs_src, configs_dest):
|
||||
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
|
@ -484,12 +484,11 @@ class ModelManager(object):
|
||||
**pipeline_args,
|
||||
**fp_args,
|
||||
)
|
||||
|
||||
except OSError as e:
|
||||
if str(e).startswith('fp16 is not a valid'):
|
||||
print(f'Could not fetch half-precision version of model {name_or_path}; fetching full-precision instead')
|
||||
pass
|
||||
else:
|
||||
print(f'An unexpected error occurred while downloading the model: {e})')
|
||||
print(f'** An unexpected error occurred while downloading the model: {e})')
|
||||
if pipeline:
|
||||
break
|
||||
|
||||
@ -1040,7 +1039,7 @@ class ModelManager(object):
|
||||
vae = AutoencoderKL.from_pretrained(name_or_path, **vae_args, **fp_args)
|
||||
except OSError as e:
|
||||
if str(e).startswith('fp16 is not a valid'):
|
||||
print(' | Half-precision version of model not available; fetching full-precision instead')
|
||||
pass
|
||||
else:
|
||||
deferred_error = e
|
||||
if vae:
|
||||
|
@ -98,13 +98,13 @@ dependencies = [
|
||||
|
||||
# legacy entrypoints; provided for backwards compatibility
|
||||
"invoke.py" = "ldm.invoke.CLI:main"
|
||||
"configure_invokeai.py" = "ldm.invoke.config.configure_invokeai:main"
|
||||
"configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main"
|
||||
"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main"
|
||||
"merge_embeddings.py" = "ldm.invoke.merge_diffusers:main"
|
||||
|
||||
# modern entrypoints
|
||||
"invokeai" = "ldm.invoke.CLI:main"
|
||||
"invokeai-configure" = "ldm.invoke.config.configure_invokeai:main"
|
||||
"invokeai-configure" = "ldm.invoke.config.invokeai_configure:main"
|
||||
"invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging
|
||||
"invokeai-ti" = "ldm.invoke.training.textual_inversion:main"
|
||||
|
||||
|
@ -2,8 +2,8 @@
|
||||
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
|
||||
|
||||
import warnings
|
||||
from ldm.invoke.config import configure_invokeai
|
||||
from ldm.invoke.config import invokeai_configure
|
||||
|
||||
if __name__ == '__main__':
|
||||
warnings.warn("configire_invokeai.py is deprecated, please run 'invoke'", DeprecationWarning)
|
||||
warnings.warn("configure_invokeai.py is deprecated, please run 'invokai-configure'", DeprecationWarning)
|
||||
configure_invokeai.main()
|
||||
|
Loading…
Reference in New Issue
Block a user