Merge branch 'main' into refactor/model_manager_instantiate

This commit is contained in:
Kevin Turner
2023-08-04 21:34:38 -07:00
committed by GitHub
4 changed files with 22 additions and 13 deletions

View File

@ -124,7 +124,7 @@ installation. Examples:
invokeai-model-install --list controlnet invokeai-model-install --list controlnet
# (install the model at the indicated URL) # (install the model at the indicated URL)
invokeai-model-install --add http://civitai.com/2860 invokeai-model-install --add https://civitai.com/api/download/models/128713
# (delete the named model) # (delete the named model)
invokeai-model-install --delete sd-1/main/analog-diffusion invokeai-model-install --delete sd-1/main/analog-diffusion
@ -170,4 +170,4 @@ elsewhere on disk and they will be autoimported. You can also create
subfolders and organize them as you wish. subfolders and organize them as you wish.
The location of the autoimport directories are controlled by settings The location of the autoimport directories are controlled by settings
in `invokeai.yaml`. See [Configuration](../features/CONFIGURATION.md). in `invokeai.yaml`. See [Configuration](../features/CONFIGURATION.md).

View File

@ -414,6 +414,7 @@ class InvokeAIAppConfig(InvokeAISettings):
outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths') outdir : Path = Field(default='outputs', description='Default folder for output images', category='Paths')
from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths') from_file : Path = Field(default=None, description='Take command input from the indicated file (command-line client only)', category='Paths')
use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths') use_memory_db : bool = Field(default=False, description='Use in-memory database for storing image metadata', category='Paths')
ignore_missing_core_models : bool = Field(default=False, description='Ignore missing models in models/core/convert')
model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models') model : str = Field(default='stable-diffusion-1.5', description='Initial model name', category='Models')

View File

@ -12,16 +12,17 @@ def check_invokeai_root(config: InvokeAIAppConfig):
assert config.model_conf_path.exists(), f"{config.model_conf_path} not found" assert config.model_conf_path.exists(), f"{config.model_conf_path} not found"
assert config.db_path.parent.exists(), f"{config.db_path.parent} not found" assert config.db_path.parent.exists(), f"{config.db_path.parent} not found"
assert config.models_path.exists(), f"{config.models_path} not found" assert config.models_path.exists(), f"{config.models_path} not found"
for model in [ if not config.ignore_missing_core_models:
"CLIP-ViT-bigG-14-laion2B-39B-b160k", for model in [
"bert-base-uncased", "CLIP-ViT-bigG-14-laion2B-39B-b160k",
"clip-vit-large-patch14", "bert-base-uncased",
"sd-vae-ft-mse", "clip-vit-large-patch14",
"stable-diffusion-2-clip", "sd-vae-ft-mse",
"stable-diffusion-safety-checker", "stable-diffusion-2-clip",
]: "stable-diffusion-safety-checker",
path = config.models_path / f"core/convert/{model}" ]:
assert path.exists(), f"{path} is missing" path = config.models_path / f"core/convert/{model}"
assert path.exists(), f"{path} is missing"
except Exception as e: except Exception as e:
print() print()
print(f"An exception has occurred: {str(e)}") print(f"An exception has occurred: {str(e)}")
@ -32,5 +33,10 @@ def check_invokeai_root(config: InvokeAIAppConfig):
print( print(
'** From the command line, activate the virtual environment and run "invokeai-configure --yes --skip-sd-weights" **' '** From the command line, activate the virtual environment and run "invokeai-configure --yes --skip-sd-weights" **'
) )
print(
'** (To skip this check completely, add "--ignore_missing_core_models" to your CLI args. Not installing '
"these core models will prevent the loading of some or all .safetensors and .ckpt files. However, you can "
"always come back and install these core models in the future.)"
)
input("Press any key to continue...") input("Press any key to continue...")
sys.exit(0) sys.exit(0)

View File

@ -1,6 +1,8 @@
from __future__ import annotations from __future__ import annotations
from contextlib import nullcontext from contextlib import nullcontext
from packaging import version
import platform
import torch import torch
from torch import autocast from torch import autocast
@ -30,7 +32,7 @@ def choose_precision(device: torch.device) -> str:
device_name = torch.cuda.get_device_name(device) device_name = torch.cuda.get_device_name(device)
if not ("GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name): if not ("GeForce GTX 1660" in device_name or "GeForce GTX 1650" in device_name):
return "float16" return "float16"
elif device.type == "mps": elif device.type == "mps" and version.parse(platform.mac_ver()[0]) < version.parse("14.0.0"):
return "float16" return "float16"
return "float32" return "float32"