Compare commits

...

4 Commits

Author SHA1 Message Date
0bbc83fe34 Bump up version to v2.3.0rc3
- Also changed the "latest" tag to "latest-2.3" to avoid interfering
  with the 2.2.5 update script, which fetches "latest"
2023-02-04 13:34:11 -05:00
841e92e02a guesstimated disk space needed for "all" models 2023-02-04 13:32:34 -05:00
4fbcdf2e28 adjust estimated file size reported by configure 2023-02-04 13:18:28 -05:00
f744cf06e3 configuration script tidying up
- Rename configure_invokeai.py to invokeai_configure.py to be
  consistent with installed script name
- Remove warning message about half-precision models not being
  available during the model download process.
2023-02-04 12:00:17 -05:00
8 changed files with 19 additions and 22 deletions

View File

@ -19,7 +19,7 @@ echo Building installer for version $VERSION
echo "Be certain that you're in the 'installer' directory before continuing."
read -p "Press any key to continue, or CTRL-C to exit..."
read -e -p "Commit and tag this repo with ${VERSION} and 'latest'? [n]: " input
read -e -p "Commit and tag this repo with ${VERSION} and 'latest-2.3'? [n]: " input
RESPONSE=${input:='n'}
if [ "$RESPONSE" == 'y' ]; then
git commit -a
@ -28,8 +28,8 @@ if [ "$RESPONSE" == 'y' ]; then
echo "Existing/invalid tag"
exit -1
fi
git push origin :refs/tags/latest
git tag -fa latest
git push origin :refs/tags/latest-2.3
git tag -fa latest-2.3
fi
# ----------------------

View File

@ -338,7 +338,7 @@ class InvokeAiInstance:
introduction()
from ldm.invoke.config import configure_invokeai
from ldm.invoke.config import invokeai_configure
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
# from the installer will also automatically propagate down to the config script.
@ -346,7 +346,7 @@ class InvokeAiInstance:
# set sys.argv to a consistent state
configure_invokeai.main()
invokeai_configure.main()
def install_user_scripts(self):
"""

View File

@ -1133,8 +1133,8 @@ def report_model_error(opt:Namespace, e:Exception):
for arg in yes_to_all.split():
sys.argv.append(arg)
from ldm.invoke.config import configure_invokeai
configure_invokeai.main()
from ldm.invoke.config import invokeai_configure
invokeai_configure.main()
print('** InvokeAI will now restart')
sys.argv = previous_args
main() # would rather do a os.exec(), but doesn't exist?

View File

@ -1 +1 @@
__version__='2.3.0-rc2'
__version__='2.3.0-rc3'

View File

@ -127,8 +127,8 @@ script do it for you. Manual installation is described at:
https://invoke-ai.github.io/InvokeAI/installation/020_INSTALL_MANUAL/
You may download the recommended models (about 10GB total), select a customized set, or
completely skip this step.
You may download the recommended models (about 15GB total), install all models (40 GB!!)
select a customized set, or completely skip this step.
"""
)
completer.set_options(["recommended", "customized", "skip"])
@ -433,9 +433,7 @@ def _download_diffusion_weights(
)
except OSError as e:
if str(e).startswith("fp16 is not a valid"):
print(
f"Could not fetch half-precision version of model {repo_id}; fetching full-precision instead"
)
pass
else:
print(f"An unexpected error occurred while downloading the model: {e})")
if path:
@ -866,7 +864,7 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
):
os.makedirs(os.path.join(root, name), exist_ok=True)
configs_src = Path(configs.__path__[-1])
configs_src = Path(configs.__path__[0])
configs_dest = Path(root) / "configs"
if not os.path.samefile(configs_src, configs_dest):
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)

View File

@ -483,12 +483,11 @@ class ModelManager(object):
**pipeline_args,
**fp_args,
)
except OSError as e:
if str(e).startswith('fp16 is not a valid'):
print(f'Could not fetch half-precision version of model {name_or_path}; fetching full-precision instead')
pass
else:
print(f'An unexpected error occurred while downloading the model: {e})')
print(f'** An unexpected error occurred while downloading the model: {e})')
if pipeline:
break
@ -1039,7 +1038,7 @@ class ModelManager(object):
vae = AutoencoderKL.from_pretrained(name_or_path, **vae_args, **fp_args)
except OSError as e:
if str(e).startswith('fp16 is not a valid'):
print(' | Half-precision version of model not available; fetching full-precision instead')
pass
else:
deferred_error = e
if vae:

View File

@ -98,13 +98,13 @@ dependencies = [
# legacy entrypoints; provided for backwards compatibility
"invoke.py" = "ldm.invoke.CLI:main"
"configure_invokeai.py" = "ldm.invoke.config.configure_invokeai:main"
"configure_invokeai.py" = "ldm.invoke.config.invokeai_configure:main"
"textual_inversion.py" = "ldm.invoke.training.textual_inversion:main"
"merge_embeddings.py" = "ldm.invoke.merge_diffusers:main"
# modern entrypoints
"invokeai" = "ldm.invoke.CLI:main"
"invokeai-configure" = "ldm.invoke.config.configure_invokeai:main"
"invokeai-configure" = "ldm.invoke.config.invokeai_configure:main"
"invokeai-merge" = "ldm.invoke.merge_diffusers:main" # note name munging
"invokeai-ti" = "ldm.invoke.training.textual_inversion:main"

View File

@ -2,8 +2,8 @@
# Copyright (c) 2022 Lincoln D. Stein (https://github.com/lstein)
import warnings
from ldm.invoke.config import configure_invokeai
from ldm.invoke.config import invokeai_configure
if __name__ == '__main__':
warnings.warn("configire_invokeai.py is deprecated, please run 'invoke'", DeprecationWarning)
warnings.warn("configure_invokeai.py is deprecated, please run 'invokai-configure'", DeprecationWarning)
configure_invokeai.main()