From 23497bf759d5dc1d1e0bb6dea87f9d1a07db8075 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Sun, 30 Jul 2023 20:38:17 +0200 Subject: [PATCH 1/7] add `--ignore_missing_core_models` CLI flag to bypass checking for missing core models --- invokeai/app/services/config.py | 1 + invokeai/backend/install/check_root.py | 22 ++++++++++++---------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/invokeai/app/services/config.py b/invokeai/app/services/config.py index c119f2f74c..7b5015a670 100644 --- a/invokeai/app/services/config.py +++ b/invokeai/app/services/config.py @@ -414,6 +414,7 @@ class InvokeAIAppConfig(InvokeAISettings): outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths') from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths') use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths') + ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert') model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models') diff --git a/invokeai/backend/install/check_root.py b/invokeai/backend/install/check_root.py index ded9e66635..cde53f7100 100644 --- a/invokeai/backend/install/check_root.py +++ b/invokeai/backend/install/check_root.py @@ -12,16 +12,17 @@ def check_invokeai_root(config: InvokeAIAppConfig): assert config.model_conf_path.exists(), f"{config.model_conf_path} not found" assert config.db_path.parent.exists(), f"{config.db_path.parent} not found" assert config.models_path.exists(), f"{config.models_path} not found" - for model in [ - "CLIP-ViT-bigG-14-laion2B-39B-b160k", - "bert-base-uncased", - "clip-vit-large-patch14", - "sd-vae-ft-mse", - "stable-diffusion-2-clip", - "stable-diffusion-safety-checker", - ]: - path = config.models_path / f"core/convert/{model}" - assert path.exists(), f"{path} is missing" + if not config.ignore_missing_core_models: + for model in [ + "CLIP-ViT-bigG-14-laion2B-39B-b160k", + "bert-base-uncased", + "clip-vit-large-patch14", + "sd-vae-ft-mse", + "stable-diffusion-2-clip", + "stable-diffusion-safety-checker", + ]: + path = config.models_path / f"core/convert/{model}" + assert path.exists(), f"{path} is missing" except Exception as e: print() print(f"An exception has occurred: {str(e)}") @@ -32,5 +33,6 @@ def check_invokeai_root(config: InvokeAIAppConfig): print( '** From the command line, activate the virtual environment and run "invokeai-configure --yes --skip-sd-weights" **' ) + print('** (To skip this check completely, add "--ignore_missing_core_models" to your CLI args. Not recommended.)') input("Press any key to continue...") sys.exit(0) From 8607d124c5935ffb82253152368f7deafaa15ab5 Mon Sep 17 00:00:00 2001 From: Damian Stewart Date: Wed, 2 Aug 2023 02:31:37 +0200 Subject: [PATCH 2/7] improve message about the consequences of the --ignore_missing_core_models flag --- invokeai/backend/install/check_root.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/install/check_root.py b/invokeai/backend/install/check_root.py index cde53f7100..a9887a5025 100644 --- a/invokeai/backend/install/check_root.py +++ b/invokeai/backend/install/check_root.py @@ -33,6 +33,8 @@ def check_invokeai_root(config: InvokeAIAppConfig): print( '** From the command line, activate the virtual environment and run "invokeai-configure --yes --skip-sd-weights" **' ) - print('** (To skip this check completely, add "--ignore_missing_core_models" to your CLI args. Not recommended.)') + print('** (To skip this check completely, add "--ignore_missing_core_models" to your CLI args. Not installing ' + 'these core models will prevent the loading of some or all .safetensors and .ckpt files. Howeer, you can ' + 'always come back and install these core models in the future.)') input("Press any key to continue...") sys.exit(0) From 6d7223238fa44a39ab738223a924697b9cffbc4c Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Thu, 3 Aug 2023 09:57:00 +1000 Subject: [PATCH 3/7] fix: fix typo in message --- invokeai/backend/install/check_root.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/install/check_root.py b/invokeai/backend/install/check_root.py index a9887a5025..8990903a33 100644 --- a/invokeai/backend/install/check_root.py +++ b/invokeai/backend/install/check_root.py @@ -34,7 +34,7 @@ def check_invokeai_root(config: InvokeAIAppConfig): '** From the command line, activate the virtual environment and run "invokeai-configure --yes --skip-sd-weights" **' ) print('** (To skip this check completely, add "--ignore_missing_core_models" to your CLI args. Not installing ' - 'these core models will prevent the loading of some or all .safetensors and .ckpt files. Howeer, you can ' + 'these core models will prevent the loading of some or all .safetensors and .ckpt files. However, you can ' 'always come back and install these core models in the future.)') input("Press any key to continue...") sys.exit(0) From eb6c317f04e93ee4dba34e0392eb38a28376d6ad Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Thu, 3 Aug 2023 09:59:51 +1000 Subject: [PATCH 4/7] chore: black --- invokeai/backend/install/check_root.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/invokeai/backend/install/check_root.py b/invokeai/backend/install/check_root.py index 8990903a33..2104c3a4c7 100644 --- a/invokeai/backend/install/check_root.py +++ b/invokeai/backend/install/check_root.py @@ -33,8 +33,10 @@ def check_invokeai_root(config: InvokeAIAppConfig): print( '** From the command line, activate the virtual environment and run "invokeai-configure --yes --skip-sd-weights" **' ) - print('** (To skip this check completely, add "--ignore_missing_core_models" to your CLI args. Not installing ' - 'these core models will prevent the loading of some or all .safetensors and .ckpt files. However, you can ' - 'always come back and install these core models in the future.)') + print( + '** (To skip this check completely, add "--ignore_missing_core_models" to your CLI args. Not installing ' + "these core models will prevent the loading of some or all .safetensors and .ckpt files. However, you can " + "always come back and install these core models in the future.)" + ) input("Press any key to continue...") sys.exit(0) From d162b787679c419870e1b8f1d9cc86ec7df2ad84 Mon Sep 17 00:00:00 2001 From: Lincoln Stein Date: Thu, 3 Aug 2023 22:41:05 -0400 Subject: [PATCH 5/7] fix broken civitai example link --- docs/installation/050_INSTALLING_MODELS.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/installation/050_INSTALLING_MODELS.md b/docs/installation/050_INSTALLING_MODELS.md index ce2004e841..d1c0cfacca 100644 --- a/docs/installation/050_INSTALLING_MODELS.md +++ b/docs/installation/050_INSTALLING_MODELS.md @@ -124,7 +124,7 @@ installation. Examples: invokeai-model-install --list controlnet # (install the model at the indicated URL) -invokeai-model-install --add http://civitai.com/2860 +invokeai-model-install --add https://civitai.com/api/download/models/128713 # (delete the named model) invokeai-model-install --delete sd-1/main/analog-diffusion @@ -170,4 +170,4 @@ elsewhere on disk and they will be autoimported. You can also create subfolders and organize them as you wish. The location of the autoimport directories are controlled by settings -in `invokeai.yaml`. See [Configuration](../features/CONFIGURATION.md). \ No newline at end of file +in `invokeai.yaml`. See [Configuration](../features/CONFIGURATION.md). From ecabfc252b3625a1c133272166741bbad017245a Mon Sep 17 00:00:00 2001 From: gogurtenjoyer <36354352+gogurtenjoyer@users.noreply.github.com> Date: Fri, 21 Jul 2023 19:59:22 -0400 Subject: [PATCH 6/7] devices.py - Update MPS FP16 check to account for upcoming MacOS Sonoma float16 doesn't seem to work on MacOS Sonoma due to further changes with Metal. This'll default back to float32 for Sonoma users. --- invokeai/backend/util/devices.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py index eeabcc35db..2acd5e4834 100644 --- a/invokeai/backend/util/devices.py +++ b/invokeai/backend/util/devices.py @@ -1,6 +1,8 @@ from __future__ import annotations from contextlib import nullcontext +from packaging import version +import platform import torch from torch import autocast @@ -30,7 +32,7 @@ def choose_precision(device: torch.device) -> str: device_name = torch.cuda.get_device_name(device) if not ("GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name): return "float16" - elif device.type == "mps": + elif device.type == "mps" and version.parse(platform.mac_ver()[0]) < version.parse('14.0.0'): return "float16" return "float32" From b6e369c74581e36dadf478f337ec76558f892cfb Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Sat, 5 Aug 2023 12:21:35 +1000 Subject: [PATCH 7/7] chore: black --- invokeai/backend/util/devices.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/invokeai/backend/util/devices.py b/invokeai/backend/util/devices.py index 2acd5e4834..1827f295e4 100644 --- a/invokeai/backend/util/devices.py +++ b/invokeai/backend/util/devices.py @@ -32,7 +32,7 @@ def choose_precision(device: torch.device) -> str: device_name = torch.cuda.get_device_name(device) if not ("GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name): return "float16" - elif device.type == "mps" and version.parse(platform.mac_ver()[0]) < version.parse('14.0.0'): + elif device.type == "mps" and version.parse(platform.mac_ver()[0]) < version.parse("14.0.0"): return "float16" return "float32"