merge with main; fix SDXL repo_ids

This commit is contained in:
Lincoln Stein
2023-07-26 17:38:06 -04:00
143 changed files with 5963 additions and 2662 deletions

View File

@ -0,0 +1,31 @@
"""
Check that the invokeai_root is correctly configured and exit if not.
"""
import sys
from invokeai.app.services.config import (
InvokeAIAppConfig,
)
def check_invokeai_root(config: InvokeAIAppConfig):
try:
assert config.model_conf_path.exists()
assert config.db_path.exists()
assert config.models_path.exists()
for model in [
'CLIP-ViT-bigG-14-laion2B-39B-b160k',
'bert-base-uncased',
'clip-vit-large-patch14',
'sd-vae-ft-mse',
'stable-diffusion-2-clip',
'stable-diffusion-safety-checker']:
assert (config.models_path / f'core/convert/{model}').exists()
except:
print()
print('== STARTUP ABORTED ==')
print('** One or more necessary files is missing from your InvokeAI root directory **')
print('** Please rerun the configuration script to fix this problem. **')
print('** From the launcher, selection option [7]. **')
print('** From the command line, activate the virtual environment and run "invokeai-configure --yes --skip-sd-weights" **')
input('Press any key to continue...')
sys.exit(0)

View File

@ -32,6 +32,7 @@ from omegaconf import OmegaConf
from tqdm import tqdm
from transformers import (
CLIPTextModel,
CLIPTextConfig,
CLIPTokenizer,
AutoFeatureExtractor,
BertTokenizerFast,
@ -56,6 +57,7 @@ from invokeai.frontend.install.widgets import (
from invokeai.backend.install.legacy_arg_parsing import legacy_parser
from invokeai.backend.install.model_install_backend import (
hf_download_from_pretrained,
hf_download_with_resume,
InstallSelections,
ModelInstall,
)
@ -205,6 +207,15 @@ def download_conversion_models():
pipeline = CLIPTextModel.from_pretrained(repo_id, subfolder="text_encoder", **kwargs)
pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'text_encoder', safe_serialization=True)
# sd-xl - tokenizer_2
repo_id = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
_, model_name = repo_id.split('/')
pipeline = CLIPTokenizer.from_pretrained(repo_id, **kwargs)
pipeline.save_pretrained(target_dir / model_name, safe_serialization=True)
pipeline = CLIPTextConfig.from_pretrained(repo_id, **kwargs)
pipeline.save_pretrained(target_dir / model_name, safe_serialization=True)
# VAE
logger.info('Downloading stable diffusion VAE')
vae = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse', **kwargs)
@ -288,13 +299,6 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
color="CONTROL",
)
self.nextrely += 1
self.nsfw_checker = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Activate the NSFW checker to blur images showing potential sexual imagery",
value=old_opts.nsfw_checker,
scroll_exit=True,
)
self.nextrely += 1
label = """HuggingFace access token (OPTIONAL) for automatic model downloads. See https://huggingface.co/settings/tokens."""
for line in textwrap.wrap(label,width=window_width-6):
@ -390,7 +394,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
must_exist=False,
use_two_lines=False,
labelColor="GOOD",
begin_entry_at=68,
begin_entry_at=40,
max_height=3,
scroll_exit=True,
)
@ -475,7 +479,6 @@ https://huggingface.co/spaces/CompVis/stable-diffusion-license
for attr in [
"outdir",
"nsfw_checker",
"free_gpu_mem",
"max_cache_size",
"xformers_enabled",
@ -519,7 +522,7 @@ class EditOptApplication(npyscreen.NPSAppManaged):
addModelsForm,
name="Install Stable Diffusion Models",
multipage=True,
cycle_widgets=True,
cycle_widgets=False,
)
def new_opts(self):
@ -533,8 +536,6 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam
def default_startup_options(init_file: Path) -> Namespace:
opts = InvokeAIAppConfig.get_config()
if not init_file.exists():
opts.nsfw_checker = True
return opts
def default_user_selections(program_opts: Namespace) -> InstallSelections:
@ -557,7 +558,7 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
# -------------------------------------
def initialize_rootdir(root: Path, yes_to_all: bool = False):
logger.info("** INITIALIZING INVOKEAI RUNTIME DIRECTORY **")
logger.info("Initializing InvokeAI runtime directory")
for name in (
"models",
"databases",
@ -658,7 +659,6 @@ def migrate_init_file(legacy_format:Path):
# a few places where the field names have changed and we have to
# manually add in the new names/values
new.nsfw_checker = old.safety_checker
new.xformers_enabled = old.xformers
new.conf_path = old.conf
new.root = legacy_format.parent.resolve()
@ -767,8 +767,8 @@ def main():
if migrate_if_needed(opt, config.root_path):
sys.exit(0)
if not config.model_conf_path.exists():
initialize_rootdir(config.root_path, opt.yes_to_all)
# run this unconditionally in case new directories need to be added
initialize_rootdir(config.root_path, opt.yes_to_all)
models_to_download = default_user_selections(opt)
new_init_file = config.root_path / 'invokeai.yaml'
@ -788,15 +788,14 @@ def main():
sys.exit(0)
if opt.skip_support_models:
logger.info("SKIPPING SUPPORT MODEL DOWNLOADS PER USER REQUEST")
logger.info("Skipping support models at user's request")
else:
logger.info("CHECKING/UPDATING SUPPORT MODELS")
logger.info("Installing support models")
download_support_models()
if opt.skip_sd_weights:
logger.warning("SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST")
logger.warning("Skipping diffusion weights download per user request")
elif models_to_download:
logger.info("DOWNLOADING DIFFUSION WEIGHTS")
process_and_execute(opt, models_to_download)
postscript(errors=errors)

View File

@ -58,7 +58,15 @@ LEGACY_CONFIGS = {
SchedulerPredictionType.Epsilon: 'v2-inpainting-inference.yaml',
SchedulerPredictionType.VPrediction: 'v2-inpainting-inference-v.yaml',
}
}
},
BaseModelType.StableDiffusionXL: {
ModelVariantType.Normal: 'sd_xl_base.yaml',
},
BaseModelType.StableDiffusionXLRefiner: {
ModelVariantType.Normal: 'sd_xl_refiner.yaml',
},
}
@dataclass
@ -141,16 +149,17 @@ class ModelInstall(object):
for i in installed:
print(f"{i['model_name']}\t{i['base_model']}\t{i['path']}")
def starter_models(self)->Set[str]:
# logic here a little reversed to maintain backward compatibility
def starter_models(self, all_models: bool=False)->Set[str]:
models = set()
for key, value in self.datasets.items():
name,base,model_type = ModelManager.parse_key(key)
if model_type==ModelType.Main:
if all_models or model_type==ModelType.Main:
models.add(key)
return models
def recommended_models(self)->Set[str]:
starters = self.starter_models()
starters = self.starter_models(all_models=True)
return set([x for x in starters if self.datasets[x].get('recommended',False)])
def default_model(self)->str:
@ -329,6 +338,7 @@ class ModelInstall(object):
description = str(description),
model_format = info.format,
)
legacy_conf = None
if info.model_type == ModelType.Main:
attributes.update(dict(variant = info.variant_type,))
if info.format=="checkpoint":
@ -343,11 +353,17 @@ class ModelInstall(object):
except KeyError:
legacy_conf = Path(self.config.legacy_conf_dir, 'v1-inference.yaml') # best guess
attributes.update(
dict(
config = str(legacy_conf)
)
if info.model_type == ModelType.ControlNet and info.format=="checkpoint":
possible_conf = path.with_suffix('.yaml')
if possible_conf.exists():
legacy_conf = str(self.relative_to_root(possible_conf))
if legacy_conf:
attributes.update(
dict(
config = str(legacy_conf)
)
)
return attributes
def relative_to_root(self, path: Path)->Path: