Apply black

This commit is contained in:
Martin Kristiansen
2023-07-27 10:54:01 -04:00
parent 2183dba5c5
commit 218b6d0546
148 changed files with 5486 additions and 6296 deletions

View File

@ -6,28 +6,31 @@ from invokeai.app.services.config import (
InvokeAIAppConfig,
)
def check_invokeai_root(config: InvokeAIAppConfig):
try:
assert config.model_conf_path.exists(), f'{config.model_conf_path} not found'
assert config.db_path.parent.exists(), f'{config.db_path.parent} not found'
assert config.models_path.exists(), f'{config.models_path} not found'
assert config.model_conf_path.exists(), f"{config.model_conf_path} not found"
assert config.db_path.parent.exists(), f"{config.db_path.parent} not found"
assert config.models_path.exists(), f"{config.models_path} not found"
for model in [
'CLIP-ViT-bigG-14-laion2B-39B-b160k',
'bert-base-uncased',
'clip-vit-large-patch14',
'sd-vae-ft-mse',
'stable-diffusion-2-clip',
'stable-diffusion-safety-checker']:
path = config.models_path / f'core/convert/{model}'
assert path.exists(), f'{path} is missing'
"CLIP-ViT-bigG-14-laion2B-39B-b160k",
"bert-base-uncased",
"clip-vit-large-patch14",
"sd-vae-ft-mse",
"stable-diffusion-2-clip",
"stable-diffusion-safety-checker",
]:
path = config.models_path / f"core/convert/{model}"
assert path.exists(), f"{path} is missing"
except Exception as e:
print()
print(f'An exception has occurred: {str(e)}')
print('== STARTUP ABORTED ==')
print('** One or more necessary files is missing from your InvokeAI root directory **')
print('** Please rerun the configuration script to fix this problem. **')
print('** From the launcher, selection option [7]. **')
print('** From the command line, activate the virtual environment and run "invokeai-configure --yes --skip-sd-weights" **')
input('Press any key to continue...')
print(f"An exception has occurred: {str(e)}")
print("== STARTUP ABORTED ==")
print("** One or more necessary files is missing from your InvokeAI root directory **")
print("** Please rerun the configuration script to fix this problem. **")
print("** From the launcher, selection option [7]. **")
print(
'** From the command line, activate the virtual environment and run "invokeai-configure --yes --skip-sd-weights" **'
)
input("Press any key to continue...")
sys.exit(0)

View File

@ -60,9 +60,7 @@ from invokeai.backend.install.model_install_backend import (
InstallSelections,
ModelInstall,
)
from invokeai.backend.model_management.model_probe import (
ModelType, BaseModelType
)
from invokeai.backend.model_management.model_probe import ModelType, BaseModelType
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()
@ -77,7 +75,7 @@ Model_dir = "models"
Default_config_file = config.model_conf_path
SD_Configs = config.legacy_conf_path
PRECISION_CHOICES = ['auto','float16','float32']
PRECISION_CHOICES = ["auto", "float16", "float32"]
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
# This is the InvokeAI initialization file, which contains command-line default values.
@ -85,7 +83,8 @@ INIT_FILE_PREAMBLE = """# InvokeAI initialization file
# or renaming it and then running invokeai-configure again.
"""
logger=InvokeAILogger.getLogger()
logger = InvokeAILogger.getLogger()
# --------------------------------------------
def postscript(errors: None):
@ -108,7 +107,9 @@ Add the '--help' argument to see all of the command-line switches available for
"""
else:
message = "\n** There were errors during installation. It is possible some of the models were not fully downloaded.\n"
message = (
"\n** There were errors during installation. It is possible some of the models were not fully downloaded.\n"
)
for err in errors:
message += f"\t - {err}\n"
message += "Please check the logs above and correct any issues."
@ -169,9 +170,7 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
logger.info(f"Installing {label} model file {model_url}...")
if not os.path.exists(model_dest):
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
request.urlretrieve(
model_url, model_dest, ProgressBar(os.path.basename(model_dest))
)
request.urlretrieve(model_url, model_dest, ProgressBar(os.path.basename(model_dest)))
logger.info("...downloaded successfully")
else:
logger.info("...exists")
@ -182,90 +181,93 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
def download_conversion_models():
target_dir = config.root_path / 'models/core/convert'
target_dir = config.root_path / "models/core/convert"
kwargs = dict() # for future use
try:
logger.info('Downloading core tokenizers and text encoders')
logger.info("Downloading core tokenizers and text encoders")
# bert
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
bert = BertTokenizerFast.from_pretrained("bert-base-uncased", **kwargs)
bert.save_pretrained(target_dir / 'bert-base-uncased', safe_serialization=True)
bert.save_pretrained(target_dir / "bert-base-uncased", safe_serialization=True)
# sd-1
repo_id = 'openai/clip-vit-large-patch14'
hf_download_from_pretrained(CLIPTokenizer, repo_id, target_dir / 'clip-vit-large-patch14')
hf_download_from_pretrained(CLIPTextModel, repo_id, target_dir / 'clip-vit-large-patch14')
repo_id = "openai/clip-vit-large-patch14"
hf_download_from_pretrained(CLIPTokenizer, repo_id, target_dir / "clip-vit-large-patch14")
hf_download_from_pretrained(CLIPTextModel, repo_id, target_dir / "clip-vit-large-patch14")
# sd-2
repo_id = "stabilityai/stable-diffusion-2"
pipeline = CLIPTokenizer.from_pretrained(repo_id, subfolder="tokenizer", **kwargs)
pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'tokenizer', safe_serialization=True)
pipeline.save_pretrained(target_dir / "stable-diffusion-2-clip" / "tokenizer", safe_serialization=True)
pipeline = CLIPTextModel.from_pretrained(repo_id, subfolder="text_encoder", **kwargs)
pipeline.save_pretrained(target_dir / 'stable-diffusion-2-clip' / 'text_encoder', safe_serialization=True)
pipeline.save_pretrained(target_dir / "stable-diffusion-2-clip" / "text_encoder", safe_serialization=True)
# sd-xl - tokenizer_2
repo_id = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
_, model_name = repo_id.split('/')
_, model_name = repo_id.split("/")
pipeline = CLIPTokenizer.from_pretrained(repo_id, **kwargs)
pipeline.save_pretrained(target_dir / model_name, safe_serialization=True)
pipeline = CLIPTextConfig.from_pretrained(repo_id, **kwargs)
pipeline.save_pretrained(target_dir / model_name, safe_serialization=True)
# VAE
logger.info('Downloading stable diffusion VAE')
vae = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse', **kwargs)
vae.save_pretrained(target_dir / 'sd-vae-ft-mse', safe_serialization=True)
logger.info("Downloading stable diffusion VAE")
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", **kwargs)
vae.save_pretrained(target_dir / "sd-vae-ft-mse", safe_serialization=True)
# safety checking
logger.info('Downloading safety checker')
logger.info("Downloading safety checker")
repo_id = "CompVis/stable-diffusion-safety-checker"
pipeline = AutoFeatureExtractor.from_pretrained(repo_id,**kwargs)
pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True)
pipeline = AutoFeatureExtractor.from_pretrained(repo_id, **kwargs)
pipeline.save_pretrained(target_dir / "stable-diffusion-safety-checker", safe_serialization=True)
pipeline = StableDiffusionSafetyChecker.from_pretrained(repo_id,**kwargs)
pipeline.save_pretrained(target_dir / 'stable-diffusion-safety-checker', safe_serialization=True)
pipeline = StableDiffusionSafetyChecker.from_pretrained(repo_id, **kwargs)
pipeline.save_pretrained(target_dir / "stable-diffusion-safety-checker", safe_serialization=True)
except KeyboardInterrupt:
raise
except Exception as e:
logger.error(str(e))
# ---------------------------------------------
def download_realesrgan():
logger.info("Installing ESRGAN Upscaling models...")
URLs = [
dict(
url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
dest = "core/upscaling/realesrgan/RealESRGAN_x4plus.pth",
description = "RealESRGAN_x4plus.pth",
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
dest="core/upscaling/realesrgan/RealESRGAN_x4plus.pth",
description="RealESRGAN_x4plus.pth",
),
dict(
url = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
dest = "core/upscaling/realesrgan/RealESRGAN_x4plus_anime_6B.pth",
description = "RealESRGAN_x4plus_anime_6B.pth",
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
dest="core/upscaling/realesrgan/RealESRGAN_x4plus_anime_6B.pth",
description="RealESRGAN_x4plus_anime_6B.pth",
),
dict(
url= "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
dest= "core/upscaling/realesrgan/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
description = "ESRGAN_SRx4_DF2KOST_official.pth",
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
dest="core/upscaling/realesrgan/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth",
description="ESRGAN_SRx4_DF2KOST_official.pth",
),
dict(
url= "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
dest= "core/upscaling/realesrgan/RealESRGAN_x2plus.pth",
description = "RealESRGAN_x2plus.pth",
url="https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
dest="core/upscaling/realesrgan/RealESRGAN_x2plus.pth",
description="RealESRGAN_x2plus.pth",
),
]
for model in URLs:
download_with_progress_bar(model['url'], config.models_path / model['dest'], model['description'])
download_with_progress_bar(model["url"], config.models_path / model["dest"], model["description"])
# ---------------------------------------------
def download_support_models():
download_realesrgan()
download_conversion_models()
# -------------------------------------
def get_root(root: str = None) -> str:
if root:
@ -275,6 +277,7 @@ def get_root(root: str = None) -> str:
else:
return str(config.root_path)
# -------------------------------------
class editOptsForm(CyclingForm, npyscreen.FormMultiPage):
# for responsive resizing - disabled
@ -283,14 +286,14 @@ class editOptsForm(CyclingForm, npyscreen.FormMultiPage):
def create(self):
program_opts = self.parentApp.program_opts
old_opts = self.parentApp.invokeai_opts
first_time = not (config.root_path / 'invokeai.yaml').exists()
first_time = not (config.root_path / "invokeai.yaml").exists()
access_token = HfFolder.get_token()
window_width, window_height = get_terminal_size()
label = """Configure startup settings. You can come back and change these later.
Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields.
Use cursor arrows to make a checkbox selection, and space to toggle.
"""
for i in textwrap.wrap(label,width=window_width-6):
for i in textwrap.wrap(label, width=window_width - 6):
self.add_widget_intelligent(
npyscreen.FixedText,
value=i,
@ -300,7 +303,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
self.nextrely += 1
label = """HuggingFace access token (OPTIONAL) for automatic model downloads. See https://huggingface.co/settings/tokens."""
for line in textwrap.wrap(label,width=window_width-6):
for line in textwrap.wrap(label, width=window_width - 6):
self.add_widget_intelligent(
npyscreen.FixedText,
value=line,
@ -343,7 +346,7 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
relx=50,
scroll_exit=True,
)
self.nextrely -=1
self.nextrely -= 1
self.always_use_cpu = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Force CPU to be used on GPU systems",
@ -351,10 +354,8 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
relx=80,
scroll_exit=True,
)
precision = old_opts.precision or (
"float32" if program_opts.full_precision else "auto"
)
self.nextrely +=1
precision = old_opts.precision or ("float32" if program_opts.full_precision else "auto")
self.nextrely += 1
self.add_widget_intelligent(
npyscreen.TitleFixedText,
name="Floating Point Precision",
@ -363,10 +364,10 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
color="CONTROL",
scroll_exit=True,
)
self.nextrely -=1
self.nextrely -= 1
self.precision = self.add_widget_intelligent(
SingleSelectColumns,
columns = 3,
columns=3,
name="Precision",
values=PRECISION_CHOICES,
value=PRECISION_CHOICES.index(precision),
@ -398,25 +399,25 @@ Use cursor arrows to make a checkbox selection, and space to toggle.
scroll_exit=True,
)
self.autoimport_dirs = {}
self.autoimport_dirs['autoimport_dir'] = self.add_widget_intelligent(
FileBox,
name=f'Folder to recursively scan for new checkpoints, ControlNets, LoRAs and TI models',
value=str(config.root_path / config.autoimport_dir),
select_dir=True,
must_exist=False,
use_two_lines=False,
labelColor="GOOD",
begin_entry_at=32,
max_height = 3,
scroll_exit=True
)
self.autoimport_dirs["autoimport_dir"] = self.add_widget_intelligent(
FileBox,
name=f"Folder to recursively scan for new checkpoints, ControlNets, LoRAs and TI models",
value=str(config.root_path / config.autoimport_dir),
select_dir=True,
must_exist=False,
use_two_lines=False,
labelColor="GOOD",
begin_entry_at=32,
max_height=3,
scroll_exit=True,
)
self.nextrely += 1
label = """BY DOWNLOADING THE STABLE DIFFUSION WEIGHT FILES, YOU AGREE TO HAVE READ
AND ACCEPTED THE CREATIVEML RESPONSIBLE AI LICENSES LOCATED AT
https://huggingface.co/spaces/CompVis/stable-diffusion-license and
https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md
"""
for i in textwrap.wrap(label,width=window_width-6):
for i in textwrap.wrap(label, width=window_width - 6):
self.add_widget_intelligent(
npyscreen.FixedText,
value=i,
@ -431,11 +432,7 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
scroll_exit=True,
)
self.nextrely += 1
label = (
"DONE"
if program_opts.skip_sd_weights or program_opts.default_only
else "NEXT"
)
label = "DONE" if program_opts.skip_sd_weights or program_opts.default_only else "NEXT"
self.ok_button = self.add_widget_intelligent(
CenteredButtonPress,
name=label,
@ -454,13 +451,11 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
self.editing = False
else:
self.editing = True
def validate_field_values(self, opt: Namespace) -> bool:
bad_fields = []
if not opt.license_acceptance:
bad_fields.append(
"Please accept the license terms before proceeding to model downloads"
)
bad_fields.append("Please accept the license terms before proceeding to model downloads")
if not Path(opt.outdir).parent.exists():
bad_fields.append(
f"The output directory does not seem to be valid. Please check that {str(Path(opt.outdir).parent)} is an existing directory."
@ -478,11 +473,11 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
new_opts = Namespace()
for attr in [
"outdir",
"free_gpu_mem",
"max_cache_size",
"xformers_enabled",
"always_use_cpu",
"outdir",
"free_gpu_mem",
"max_cache_size",
"xformers_enabled",
"always_use_cpu",
]:
setattr(new_opts, attr, getattr(self, attr).value)
@ -495,7 +490,7 @@ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENS
new_opts.hf_token = self.hf_token.value
new_opts.license_acceptance = self.license_acceptance.value
new_opts.precision = PRECISION_CHOICES[self.precision.value[0]]
return new_opts
@ -534,19 +529,20 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam
editApp.run()
return editApp.new_opts()
def default_startup_options(init_file: Path) -> Namespace:
opts = InvokeAIAppConfig.get_config()
return opts
def default_user_selections(program_opts: Namespace) -> InstallSelections:
try:
installer = ModelInstall(config)
except omegaconf.errors.ConfigKeyError:
logger.warning('Your models.yaml file is corrupt or out of date. Reinitializing')
logger.warning("Your models.yaml file is corrupt or out of date. Reinitializing")
initialize_rootdir(config.root_path, True)
installer = ModelInstall(config)
models = installer.all_models()
return InstallSelections(
install_models=[models[installer.default_model()].path or models[installer.default_model()].repo_id]
@ -556,55 +552,46 @@ def default_user_selections(program_opts: Namespace) -> InstallSelections:
else list(),
)
# -------------------------------------
def initialize_rootdir(root: Path, yes_to_all: bool = False):
logger.info("Initializing InvokeAI runtime directory")
for name in (
"models",
"databases",
"text-inversion-output",
"text-inversion-training-data",
"configs"
):
for name in ("models", "databases", "text-inversion-output", "text-inversion-training-data", "configs"):
os.makedirs(os.path.join(root, name), exist_ok=True)
for model_type in ModelType:
Path(root, 'autoimport', model_type.value).mkdir(parents=True, exist_ok=True)
Path(root, "autoimport", model_type.value).mkdir(parents=True, exist_ok=True)
configs_src = Path(configs.__path__[0])
configs_dest = root / "configs"
if not os.path.samefile(configs_src, configs_dest):
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
dest = root / 'models'
dest = root / "models"
for model_base in BaseModelType:
for model_type in ModelType:
path = dest / model_base.value / model_type.value
path.mkdir(parents=True, exist_ok=True)
path = dest / 'core'
path = dest / "core"
path.mkdir(parents=True, exist_ok=True)
maybe_create_models_yaml(root)
def maybe_create_models_yaml(root: Path):
models_yaml = root / 'configs' / 'models.yaml'
models_yaml = root / "configs" / "models.yaml"
if models_yaml.exists():
if OmegaConf.load(models_yaml).get('__metadata__'): # up to date
if OmegaConf.load(models_yaml).get("__metadata__"): # up to date
return
else:
logger.info('Creating new models.yaml, original saved as models.yaml.orig')
models_yaml.rename(models_yaml.parent / 'models.yaml.orig')
with open(models_yaml,'w') as yaml_file:
yaml_file.write(yaml.dump({'__metadata__':
{'version':'3.0.0'}
}
)
)
logger.info("Creating new models.yaml, original saved as models.yaml.orig")
models_yaml.rename(models_yaml.parent / "models.yaml.orig")
with open(models_yaml, "w") as yaml_file:
yaml_file.write(yaml.dump({"__metadata__": {"version": "3.0.0"}}))
# -------------------------------------
def run_console_ui(
program_opts: Namespace, initfile: Path = None
) -> (Namespace, Namespace):
def run_console_ui(program_opts: Namespace, initfile: Path = None) -> (Namespace, Namespace):
# parse_args() will read from init file if present
invokeai_opts = default_startup_options(initfile)
invokeai_opts.root = program_opts.root
@ -616,8 +603,9 @@ def run_console_ui(
# the install-models application spawns a subprocess to install
# models, and will crash unless this is set before running.
import torch
torch.multiprocessing.set_start_method("spawn")
editApp = EditOptApplication(program_opts, invokeai_opts)
editApp.run()
if editApp.user_cancelled:
@ -634,39 +622,42 @@ def write_opts(opts: Namespace, init_file: Path):
# this will load current settings
new_config = InvokeAIAppConfig.get_config()
new_config.root = config.root
for key,value in opts.__dict__.items():
if hasattr(new_config,key):
setattr(new_config,key,value)
with open(init_file,'w', encoding='utf-8') as file:
for key, value in opts.__dict__.items():
if hasattr(new_config, key):
setattr(new_config, key, value)
with open(init_file, "w", encoding="utf-8") as file:
file.write(new_config.to_yaml())
if hasattr(opts,'hf_token') and opts.hf_token:
if hasattr(opts, "hf_token") and opts.hf_token:
HfLogin(opts.hf_token)
# -------------------------------------
def default_output_dir() -> Path:
return config.root_path / "outputs"
# -------------------------------------
def write_default_options(program_opts: Namespace, initfile: Path):
opt = default_startup_options(initfile)
write_opts(opt, initfile)
# -------------------------------------
# Here we bring in
# the legacy Args object in order to parse
# the old init file and write out the new
# yaml format.
def migrate_init_file(legacy_format:Path):
old = legacy_parser.parse_args([f'@{str(legacy_format)}'])
def migrate_init_file(legacy_format: Path):
old = legacy_parser.parse_args([f"@{str(legacy_format)}"])
new = InvokeAIAppConfig.get_config()
fields = list(get_type_hints(InvokeAIAppConfig).keys())
for attr in fields:
if hasattr(old,attr):
setattr(new,attr,getattr(old,attr))
if hasattr(old, attr):
setattr(new, attr, getattr(old, attr))
# a few places where the field names have changed and we have to
# manually add in the new names/values
@ -674,40 +665,43 @@ def migrate_init_file(legacy_format:Path):
new.conf_path = old.conf
new.root = legacy_format.parent.resolve()
invokeai_yaml = legacy_format.parent / 'invokeai.yaml'
with open(invokeai_yaml,"w", encoding="utf-8") as outfile:
invokeai_yaml = legacy_format.parent / "invokeai.yaml"
with open(invokeai_yaml, "w", encoding="utf-8") as outfile:
outfile.write(new.to_yaml())
legacy_format.replace(legacy_format.parent / 'invokeai.init.orig')
legacy_format.replace(legacy_format.parent / "invokeai.init.orig")
# -------------------------------------
def migrate_models(root: Path):
from invokeai.backend.install.migrate_to_3 import do_migrate
do_migrate(root, root)
def migrate_if_needed(opt: Namespace, root: Path)->bool:
# We check for to see if the runtime directory is correctly initialized.
old_init_file = root / 'invokeai.init'
new_init_file = root / 'invokeai.yaml'
old_hub = root / 'models/hub'
migration_needed = (old_init_file.exists() and not new_init_file.exists()) and old_hub.exists()
if migration_needed:
if opt.yes_to_all or \
yes_or_no(f'{str(config.root_path)} appears to be a 2.3 format root directory. Convert to version 3.0?'):
logger.info('** Migrating invokeai.init to invokeai.yaml')
def migrate_if_needed(opt: Namespace, root: Path) -> bool:
# We check for to see if the runtime directory is correctly initialized.
old_init_file = root / "invokeai.init"
new_init_file = root / "invokeai.yaml"
old_hub = root / "models/hub"
migration_needed = (old_init_file.exists() and not new_init_file.exists()) and old_hub.exists()
if migration_needed:
if opt.yes_to_all or yes_or_no(
f"{str(config.root_path)} appears to be a 2.3 format root directory. Convert to version 3.0?"
):
logger.info("** Migrating invokeai.init to invokeai.yaml")
migrate_init_file(old_init_file)
config.parse_args(argv=[],conf=OmegaConf.load(new_init_file))
config.parse_args(argv=[], conf=OmegaConf.load(new_init_file))
if old_hub.exists():
migrate_models(config.root_path)
else:
print('Cannot continue without conversion. Aborting.')
print("Cannot continue without conversion. Aborting.")
return migration_needed
# -------------------------------------
def main():
parser = argparse.ArgumentParser(description="InvokeAI model downloader")
@ -764,9 +758,9 @@ def main():
invoke_args = []
if opt.root:
invoke_args.extend(['--root',opt.root])
invoke_args.extend(["--root", opt.root])
if opt.full_precision:
invoke_args.extend(['--precision','float32'])
invoke_args.extend(["--precision", "float32"])
config.parse_args(invoke_args)
logger = InvokeAILogger().getLogger(config=config)
@ -782,22 +776,18 @@ def main():
initialize_rootdir(config.root_path, opt.yes_to_all)
models_to_download = default_user_selections(opt)
new_init_file = config.root_path / 'invokeai.yaml'
new_init_file = config.root_path / "invokeai.yaml"
if opt.yes_to_all:
write_default_options(opt, new_init_file)
init_options = Namespace(
precision="float32" if opt.full_precision else "float16"
)
init_options = Namespace(precision="float32" if opt.full_precision else "float16")
else:
init_options, models_to_download = run_console_ui(opt, new_init_file)
if init_options:
write_opts(init_options, new_init_file)
else:
logger.info(
'\n** CANCELLED AT USER\'S REQUEST. USE THE "invoke.sh" LAUNCHER TO RUN LATER **\n'
)
logger.info('\n** CANCELLED AT USER\'S REQUEST. USE THE "invoke.sh" LAUNCHER TO RUN LATER **\n')
sys.exit(0)
if opt.skip_support_models:
logger.info("Skipping support models at user's request")
else:
@ -811,7 +801,7 @@ def main():
postscript(errors=errors)
if not opt.yes_to_all:
input('Press any key to continue...')
input("Press any key to continue...")
except KeyboardInterrupt:
print("\nGoodbye! Come back soon.")

View File

@ -47,17 +47,18 @@ PRECISION_CHOICES = [
"float16",
]
class FileArgumentParser(ArgumentParser):
"""
Supports reading defaults from an init file.
"""
def convert_arg_line_to_args(self, arg_line):
return shlex.split(arg_line, comments=True)
legacy_parser = FileArgumentParser(
description=
"""
description="""
Generate images using Stable Diffusion.
Use --web to launch the web interface.
Use --from_file to load prompts from a file path or standard input ("-").
@ -65,304 +66,279 @@ Generate images using Stable Diffusion.
Other command-line arguments are defaults that can usually be overridden
prompt the command prompt.
""",
fromfile_prefix_chars='@',
fromfile_prefix_chars="@",
)
general_group = legacy_parser.add_argument_group('General')
model_group = legacy_parser.add_argument_group('Model selection')
file_group = legacy_parser.add_argument_group('Input/output')
web_server_group = legacy_parser.add_argument_group('Web server')
render_group = legacy_parser.add_argument_group('Rendering')
postprocessing_group = legacy_parser.add_argument_group('Postprocessing')
deprecated_group = legacy_parser.add_argument_group('Deprecated options')
general_group = legacy_parser.add_argument_group("General")
model_group = legacy_parser.add_argument_group("Model selection")
file_group = legacy_parser.add_argument_group("Input/output")
web_server_group = legacy_parser.add_argument_group("Web server")
render_group = legacy_parser.add_argument_group("Rendering")
postprocessing_group = legacy_parser.add_argument_group("Postprocessing")
deprecated_group = legacy_parser.add_argument_group("Deprecated options")
deprecated_group.add_argument('--laion400m')
deprecated_group.add_argument('--weights') # deprecated
general_group.add_argument(
'--version','-V',
action='store_true',
help='Print InvokeAI version number'
)
deprecated_group.add_argument("--laion400m")
deprecated_group.add_argument("--weights") # deprecated
general_group.add_argument("--version", "-V", action="store_true", help="Print InvokeAI version number")
model_group.add_argument(
'--root_dir',
"--root_dir",
default=None,
help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai.',
)
model_group.add_argument(
'--config',
'-c',
'-config',
dest='conf',
default='./configs/models.yaml',
help='Path to configuration file for alternate models.',
"--config",
"-c",
"-config",
dest="conf",
default="./configs/models.yaml",
help="Path to configuration file for alternate models.",
)
model_group.add_argument(
'--model',
"--model",
help='Indicates which diffusion model to load (defaults to "default" stanza in configs/models.yaml)',
)
model_group.add_argument(
'--weight_dirs',
nargs='+',
"--weight_dirs",
nargs="+",
type=str,
help='List of one or more directories that will be auto-scanned for new model weights to import',
help="List of one or more directories that will be auto-scanned for new model weights to import",
)
model_group.add_argument(
'--png_compression','-z',
"--png_compression",
"-z",
type=int,
default=6,
choices=range(0,9),
dest='png_compression',
help='level of PNG compression, from 0 (none) to 9 (maximum). Default is 6.'
choices=range(0, 9),
dest="png_compression",
help="level of PNG compression, from 0 (none) to 9 (maximum). Default is 6.",
)
model_group.add_argument(
'-F',
'--full_precision',
dest='full_precision',
action='store_true',
help='Deprecated way to set --precision=float32',
"-F",
"--full_precision",
dest="full_precision",
action="store_true",
help="Deprecated way to set --precision=float32",
)
model_group.add_argument(
'--max_loaded_models',
dest='max_loaded_models',
"--max_loaded_models",
dest="max_loaded_models",
type=int,
default=2,
help='Maximum number of models to keep in memory for fast switching, including the one in GPU',
help="Maximum number of models to keep in memory for fast switching, including the one in GPU",
)
model_group.add_argument(
'--free_gpu_mem',
dest='free_gpu_mem',
action='store_true',
help='Force free gpu memory before final decoding',
"--free_gpu_mem",
dest="free_gpu_mem",
action="store_true",
help="Force free gpu memory before final decoding",
)
model_group.add_argument(
'--sequential_guidance',
dest='sequential_guidance',
action='store_true',
help="Calculate guidance in serial instead of in parallel, lowering memory requirement "
"at the expense of speed",
"--sequential_guidance",
dest="sequential_guidance",
action="store_true",
help="Calculate guidance in serial instead of in parallel, lowering memory requirement " "at the expense of speed",
)
model_group.add_argument(
'--xformers',
"--xformers",
action=argparse.BooleanOptionalAction,
default=True,
help='Enable/disable xformers support (default enabled if installed)',
help="Enable/disable xformers support (default enabled if installed)",
)
model_group.add_argument(
"--always_use_cpu",
dest="always_use_cpu",
action="store_true",
help="Force use of CPU even if GPU is available"
"--always_use_cpu", dest="always_use_cpu", action="store_true", help="Force use of CPU even if GPU is available"
)
model_group.add_argument(
'--precision',
dest='precision',
"--precision",
dest="precision",
type=str,
choices=PRECISION_CHOICES,
metavar='PRECISION',
metavar="PRECISION",
help=f'Set model precision. Defaults to auto selected based on device. Options: {", ".join(PRECISION_CHOICES)}',
default='auto',
default="auto",
)
model_group.add_argument(
'--ckpt_convert',
"--ckpt_convert",
action=argparse.BooleanOptionalAction,
dest='ckpt_convert',
dest="ckpt_convert",
default=True,
help='Deprecated option. Legacy ckpt files are now always converted to diffusers when loaded.'
help="Deprecated option. Legacy ckpt files are now always converted to diffusers when loaded.",
)
model_group.add_argument(
'--internet',
"--internet",
action=argparse.BooleanOptionalAction,
dest='internet_available',
dest="internet_available",
default=True,
help='Indicate whether internet is available for just-in-time model downloading (default: probe automatically).',
help="Indicate whether internet is available for just-in-time model downloading (default: probe automatically).",
)
model_group.add_argument(
'--nsfw_checker',
'--safety_checker',
"--nsfw_checker",
"--safety_checker",
action=argparse.BooleanOptionalAction,
dest='safety_checker',
dest="safety_checker",
default=False,
help='Check for and blur potentially NSFW images. Use --no-nsfw_checker to disable.',
help="Check for and blur potentially NSFW images. Use --no-nsfw_checker to disable.",
)
model_group.add_argument(
'--autoimport',
"--autoimport",
default=None,
type=str,
help='Check the indicated directory for .ckpt/.safetensors weights files at startup and import directly',
help="Check the indicated directory for .ckpt/.safetensors weights files at startup and import directly",
)
model_group.add_argument(
'--autoconvert',
"--autoconvert",
default=None,
type=str,
help='Check the indicated directory for .ckpt/.safetensors weights files at startup and import as optimized diffuser models',
help="Check the indicated directory for .ckpt/.safetensors weights files at startup and import as optimized diffuser models",
)
model_group.add_argument(
'--patchmatch',
"--patchmatch",
action=argparse.BooleanOptionalAction,
default=True,
help='Load the patchmatch extension for outpainting. Use --no-patchmatch to disable.',
help="Load the patchmatch extension for outpainting. Use --no-patchmatch to disable.",
)
file_group.add_argument(
'--from_file',
dest='infile',
"--from_file",
dest="infile",
type=str,
help='If specified, load prompts from this file',
help="If specified, load prompts from this file",
)
file_group.add_argument(
'--outdir',
'-o',
"--outdir",
"-o",
type=str,
help='Directory to save generated images and a log of prompts and seeds. Default: ROOTDIR/outputs',
default='outputs',
help="Directory to save generated images and a log of prompts and seeds. Default: ROOTDIR/outputs",
default="outputs",
)
file_group.add_argument(
'--prompt_as_dir',
'-p',
action='store_true',
help='Place images in subdirectories named after the prompt.',
"--prompt_as_dir",
"-p",
action="store_true",
help="Place images in subdirectories named after the prompt.",
)
render_group.add_argument(
'--fnformat',
default='{prefix}.{seed}.png',
"--fnformat",
default="{prefix}.{seed}.png",
type=str,
help='Overwrite the filename format. You can use any argument as wildcard enclosed in curly braces. Default is {prefix}.{seed}.png',
help="Overwrite the filename format. You can use any argument as wildcard enclosed in curly braces. Default is {prefix}.{seed}.png",
)
render_group.add_argument("-s", "--steps", type=int, default=50, help="Number of steps")
render_group.add_argument(
'-s',
'--steps',
"-W",
"--width",
type=int,
default=50,
help='Number of steps'
help="Image width, multiple of 64",
)
render_group.add_argument(
'-W',
'--width',
"-H",
"--height",
type=int,
help='Image width, multiple of 64',
help="Image height, multiple of 64",
)
render_group.add_argument(
'-H',
'--height',
type=int,
help='Image height, multiple of 64',
)
render_group.add_argument(
'-C',
'--cfg_scale',
"-C",
"--cfg_scale",
default=7.5,
type=float,
help='Classifier free guidance (CFG) scale - higher numbers cause generator to "try" harder.',
)
render_group.add_argument(
'--sampler',
'-A',
'-m',
dest='sampler_name',
"--sampler",
"-A",
"-m",
dest="sampler_name",
type=str,
choices=SAMPLER_CHOICES,
metavar='SAMPLER_NAME',
metavar="SAMPLER_NAME",
help=f'Set the default sampler. Supported samplers: {", ".join(SAMPLER_CHOICES)}',
default='k_lms',
default="k_lms",
)
render_group.add_argument(
'--log_tokenization',
'-t',
action='store_true',
help='shows how the prompt is split into tokens'
"--log_tokenization", "-t", action="store_true", help="shows how the prompt is split into tokens"
)
render_group.add_argument(
'-f',
'--strength',
"-f",
"--strength",
type=float,
help='img2img strength for noising/unnoising. 0.0 preserves image exactly, 1.0 replaces it completely',
help="img2img strength for noising/unnoising. 0.0 preserves image exactly, 1.0 replaces it completely",
)
render_group.add_argument(
'-T',
'-fit',
'--fit',
"-T",
"-fit",
"--fit",
action=argparse.BooleanOptionalAction,
help='If specified, will resize the input image to fit within the dimensions of width x height (512x512 default)',
help="If specified, will resize the input image to fit within the dimensions of width x height (512x512 default)",
)
render_group.add_argument("--grid", "-g", action=argparse.BooleanOptionalAction, help="generate a grid")
render_group.add_argument(
'--grid',
'-g',
action=argparse.BooleanOptionalAction,
help='generate a grid'
)
render_group.add_argument(
'--embedding_directory',
'--embedding_path',
dest='embedding_path',
default='embeddings',
"--embedding_directory",
"--embedding_path",
dest="embedding_path",
default="embeddings",
type=str,
help='Path to a directory containing .bin and/or .pt files, or a single .bin/.pt file. You may use subdirectories. (default is ROOTDIR/embeddings)'
help="Path to a directory containing .bin and/or .pt files, or a single .bin/.pt file. You may use subdirectories. (default is ROOTDIR/embeddings)",
)
render_group.add_argument(
'--lora_directory',
dest='lora_path',
default='loras',
"--lora_directory",
dest="lora_path",
default="loras",
type=str,
help='Path to a directory containing LoRA files; subdirectories are not supported. (default is ROOTDIR/loras)'
help="Path to a directory containing LoRA files; subdirectories are not supported. (default is ROOTDIR/loras)",
)
render_group.add_argument(
'--embeddings',
"--embeddings",
action=argparse.BooleanOptionalAction,
default=True,
help='Enable embedding directory (default). Use --no-embeddings to disable.',
help="Enable embedding directory (default). Use --no-embeddings to disable.",
)
render_group.add_argument("--enable_image_debugging", action="store_true", help="Generates debugging image to display")
render_group.add_argument(
'--enable_image_debugging',
action='store_true',
help='Generates debugging image to display'
)
render_group.add_argument(
'--karras_max',
"--karras_max",
type=int,
default=None,
help="control the point at which the K* samplers will shift from using the Karras noise schedule (good for low step counts) to the LatentDiffusion noise schedule (good for high step counts). Set to 0 to use LatentDiffusion for all step values, and to a high value (e.g. 1000) to use Karras for all step values. [29]."
help="control the point at which the K* samplers will shift from using the Karras noise schedule (good for low step counts) to the LatentDiffusion noise schedule (good for high step counts). Set to 0 to use LatentDiffusion for all step values, and to a high value (e.g. 1000) to use Karras for all step values. [29].",
)
# Restoration related args
postprocessing_group.add_argument(
'--no_restore',
dest='restore',
action='store_false',
help='Disable face restoration with GFPGAN or codeformer',
"--no_restore",
dest="restore",
action="store_false",
help="Disable face restoration with GFPGAN or codeformer",
)
postprocessing_group.add_argument(
'--no_upscale',
dest='esrgan',
action='store_false',
help='Disable upscaling with ESRGAN',
"--no_upscale",
dest="esrgan",
action="store_false",
help="Disable upscaling with ESRGAN",
)
postprocessing_group.add_argument(
'--esrgan_bg_tile',
"--esrgan_bg_tile",
type=int,
default=400,
help='Tile size for background sampler, 0 for no tile during testing. Default: 400.',
help="Tile size for background sampler, 0 for no tile during testing. Default: 400.",
)
postprocessing_group.add_argument(
'--esrgan_denoise_str',
"--esrgan_denoise_str",
type=float,
default=0.75,
help='esrgan denoise str. 0 is no denoise, 1 is max denoise. Default: 0.75',
help="esrgan denoise str. 0 is no denoise, 1 is max denoise. Default: 0.75",
)
postprocessing_group.add_argument(
'--gfpgan_model_path',
"--gfpgan_model_path",
type=str,
default='./models/gfpgan/GFPGANv1.4.pth',
help='Indicates the path to the GFPGAN model',
default="./models/gfpgan/GFPGANv1.4.pth",
help="Indicates the path to the GFPGAN model",
)
web_server_group.add_argument(
'--web',
dest='web',
action='store_true',
help='Start in web server mode.',
"--web",
dest="web",
action="store_true",
help="Start in web server mode.",
)
web_server_group.add_argument(
'--web_develop',
dest='web_develop',
action='store_true',
help='Start in web server development mode.',
"--web_develop",
dest="web_develop",
action="store_true",
help="Start in web server development mode.",
)
web_server_group.add_argument(
"--web_verbose",
@ -376,32 +352,27 @@ web_server_group.add_argument(
help="Additional allowed origins, comma-separated",
)
web_server_group.add_argument(
'--host',
"--host",
type=str,
default='127.0.0.1',
help='Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network.'
default="127.0.0.1",
help="Web server: Host or IP to listen on. Set to 0.0.0.0 to accept traffic from other devices on your network.",
)
web_server_group.add_argument("--port", type=int, default="9090", help="Web server: Port to listen on")
web_server_group.add_argument(
'--port',
type=int,
default='9090',
help='Web server: Port to listen on'
)
web_server_group.add_argument(
'--certfile',
"--certfile",
type=str,
default=None,
help='Web server: Path to certificate file to use for SSL. Use together with --keyfile'
help="Web server: Path to certificate file to use for SSL. Use together with --keyfile",
)
web_server_group.add_argument(
'--keyfile',
"--keyfile",
type=str,
default=None,
help='Web server: Path to private key file to use for SSL. Use together with --certfile'
help="Web server: Path to private key file to use for SSL. Use together with --certfile",
)
web_server_group.add_argument(
'--gui',
dest='gui',
action='store_true',
help='Start InvokeAI GUI',
"--gui",
dest="gui",
action="store_true",
help="Start InvokeAI GUI",
)

View File

@ -1,7 +1,7 @@
'''
"""
Migrate the models directory and models.yaml file from an existing
InvokeAI 2.3 installation to 3.0.0.
'''
"""
import os
import argparse
@ -29,14 +29,13 @@ from transformers import (
import invokeai.backend.util.logging as logger
from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.backend.model_management import ModelManager
from invokeai.backend.model_management.model_probe import (
ModelProbe, ModelType, BaseModelType, ModelProbeInfo
)
from invokeai.backend.model_management.model_probe import ModelProbe, ModelType, BaseModelType, ModelProbeInfo
warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error()
diffusers.logging.set_verbosity_error()
# holder for paths that we will migrate
@dataclass
class ModelPaths:
@ -45,81 +44,82 @@ class ModelPaths:
loras: Path
controlnets: Path
class MigrateTo3(object):
def __init__(self,
from_root: Path,
to_models: Path,
model_manager: ModelManager,
src_paths: ModelPaths,
):
def __init__(
self,
from_root: Path,
to_models: Path,
model_manager: ModelManager,
src_paths: ModelPaths,
):
self.root_directory = from_root
self.dest_models = to_models
self.mgr = model_manager
self.src_paths = src_paths
@classmethod
def initialize_yaml(cls, yaml_file: Path):
with open(yaml_file, 'w') as file:
file.write(
yaml.dump(
{
'__metadata__': {'version':'3.0.0'}
}
)
)
with open(yaml_file, "w") as file:
file.write(yaml.dump({"__metadata__": {"version": "3.0.0"}}))
def create_directory_structure(self):
'''
"""
Create the basic directory structure for the models folder.
'''
for model_base in [BaseModelType.StableDiffusion1,BaseModelType.StableDiffusion2]:
for model_type in [ModelType.Main, ModelType.Vae, ModelType.Lora,
ModelType.ControlNet,ModelType.TextualInversion]:
"""
for model_base in [BaseModelType.StableDiffusion1, BaseModelType.StableDiffusion2]:
for model_type in [
ModelType.Main,
ModelType.Vae,
ModelType.Lora,
ModelType.ControlNet,
ModelType.TextualInversion,
]:
path = self.dest_models / model_base.value / model_type.value
path.mkdir(parents=True, exist_ok=True)
path = self.dest_models / 'core'
path = self.dest_models / "core"
path.mkdir(parents=True, exist_ok=True)
@staticmethod
def copy_file(src:Path,dest:Path):
'''
def copy_file(src: Path, dest: Path):
"""
copy a single file with logging
'''
"""
if dest.exists():
logger.info(f'Skipping existing {str(dest)}')
logger.info(f"Skipping existing {str(dest)}")
return
logger.info(f'Copying {str(src)} to {str(dest)}')
logger.info(f"Copying {str(src)} to {str(dest)}")
try:
shutil.copy(src, dest)
except Exception as e:
logger.error(f'COPY FAILED: {str(e)}')
logger.error(f"COPY FAILED: {str(e)}")
@staticmethod
def copy_dir(src:Path,dest:Path):
'''
def copy_dir(src: Path, dest: Path):
"""
Recursively copy a directory with logging
'''
"""
if dest.exists():
logger.info(f'Skipping existing {str(dest)}')
logger.info(f"Skipping existing {str(dest)}")
return
logger.info(f'Copying {str(src)} to {str(dest)}')
logger.info(f"Copying {str(src)} to {str(dest)}")
try:
shutil.copytree(src, dest)
except Exception as e:
logger.error(f'COPY FAILED: {str(e)}')
logger.error(f"COPY FAILED: {str(e)}")
def migrate_models(self, src_dir: Path):
'''
"""
Recursively walk through src directory, probe anything
that looks like a model, and copy the model into the
appropriate location within the destination models directory.
'''
"""
directories_scanned = set()
for root, dirs, files in os.walk(src_dir):
for d in dirs:
try:
model = Path(root,d)
model = Path(root, d)
info = ModelProbe().heuristic_probe(model)
if not info:
continue
@ -136,9 +136,9 @@ class MigrateTo3(object):
# don't copy raw learned_embeds.bin or pytorch_lora_weights.bin
# let them be copied as part of a tree copy operation
try:
if f in {'learned_embeds.bin','pytorch_lora_weights.bin'}:
if f in {"learned_embeds.bin", "pytorch_lora_weights.bin"}:
continue
model = Path(root,f)
model = Path(root, f)
if model.parent in directories_scanned:
continue
info = ModelProbe().heuristic_probe(model)
@ -154,148 +154,146 @@ class MigrateTo3(object):
logger.error(str(e))
def migrate_support_models(self):
'''
"""
Copy the clipseg, upscaler, and restoration models to their new
locations.
'''
"""
dest_directory = self.dest_models
if (self.root_directory / 'models/clipseg').exists():
self.copy_dir(self.root_directory / 'models/clipseg', dest_directory / 'core/misc/clipseg')
if (self.root_directory / 'models/realesrgan').exists():
self.copy_dir(self.root_directory / 'models/realesrgan', dest_directory / 'core/upscaling/realesrgan')
for d in ['codeformer','gfpgan']:
path = self.root_directory / 'models' / d
if (self.root_directory / "models/clipseg").exists():
self.copy_dir(self.root_directory / "models/clipseg", dest_directory / "core/misc/clipseg")
if (self.root_directory / "models/realesrgan").exists():
self.copy_dir(self.root_directory / "models/realesrgan", dest_directory / "core/upscaling/realesrgan")
for d in ["codeformer", "gfpgan"]:
path = self.root_directory / "models" / d
if path.exists():
self.copy_dir(path,dest_directory / f'core/face_restoration/{d}')
self.copy_dir(path, dest_directory / f"core/face_restoration/{d}")
def migrate_tuning_models(self):
'''
"""
Migrate the embeddings, loras and controlnets directories to their new homes.
'''
"""
for src in [self.src_paths.embeddings, self.src_paths.loras, self.src_paths.controlnets]:
if not src:
continue
if src.is_dir():
logger.info(f'Scanning {src}')
logger.info(f"Scanning {src}")
self.migrate_models(src)
else:
logger.info(f'{src} directory not found; skipping')
logger.info(f"{src} directory not found; skipping")
continue
def migrate_conversion_models(self):
'''
"""
Migrate all the models that are needed by the ckpt_to_diffusers conversion
script.
'''
"""
dest_directory = self.dest_models
kwargs = dict(
cache_dir = self.root_directory / 'models/hub',
#local_files_only = True
cache_dir=self.root_directory / "models/hub",
# local_files_only = True
)
try:
logger.info('Migrating core tokenizers and text encoders')
target_dir = dest_directory / 'core' / 'convert'
logger.info("Migrating core tokenizers and text encoders")
target_dir = dest_directory / "core" / "convert"
self._migrate_pretrained(BertTokenizerFast,
repo_id='bert-base-uncased',
dest = target_dir / 'bert-base-uncased',
**kwargs)
self._migrate_pretrained(
BertTokenizerFast, repo_id="bert-base-uncased", dest=target_dir / "bert-base-uncased", **kwargs
)
# sd-1
repo_id = 'openai/clip-vit-large-patch14'
self._migrate_pretrained(CLIPTokenizer,
repo_id= repo_id,
dest= target_dir / 'clip-vit-large-patch14',
**kwargs)
self._migrate_pretrained(CLIPTextModel,
repo_id = repo_id,
dest = target_dir / 'clip-vit-large-patch14',
force = True,
**kwargs)
repo_id = "openai/clip-vit-large-patch14"
self._migrate_pretrained(
CLIPTokenizer, repo_id=repo_id, dest=target_dir / "clip-vit-large-patch14", **kwargs
)
self._migrate_pretrained(
CLIPTextModel, repo_id=repo_id, dest=target_dir / "clip-vit-large-patch14", force=True, **kwargs
)
# sd-2
repo_id = "stabilityai/stable-diffusion-2"
self._migrate_pretrained(CLIPTokenizer,
repo_id = repo_id,
dest = target_dir / 'stable-diffusion-2-clip' / 'tokenizer',
**{'subfolder':'tokenizer',**kwargs}
)
self._migrate_pretrained(CLIPTextModel,
repo_id = repo_id,
dest = target_dir / 'stable-diffusion-2-clip' / 'text_encoder',
**{'subfolder':'text_encoder',**kwargs}
)
self._migrate_pretrained(
CLIPTokenizer,
repo_id=repo_id,
dest=target_dir / "stable-diffusion-2-clip" / "tokenizer",
**{"subfolder": "tokenizer", **kwargs},
)
self._migrate_pretrained(
CLIPTextModel,
repo_id=repo_id,
dest=target_dir / "stable-diffusion-2-clip" / "text_encoder",
**{"subfolder": "text_encoder", **kwargs},
)
# VAE
logger.info('Migrating stable diffusion VAE')
self._migrate_pretrained(AutoencoderKL,
repo_id = 'stabilityai/sd-vae-ft-mse',
dest = target_dir / 'sd-vae-ft-mse',
**kwargs)
logger.info("Migrating stable diffusion VAE")
self._migrate_pretrained(
AutoencoderKL, repo_id="stabilityai/sd-vae-ft-mse", dest=target_dir / "sd-vae-ft-mse", **kwargs
)
# safety checking
logger.info('Migrating safety checker')
logger.info("Migrating safety checker")
repo_id = "CompVis/stable-diffusion-safety-checker"
self._migrate_pretrained(AutoFeatureExtractor,
repo_id = repo_id,
dest = target_dir / 'stable-diffusion-safety-checker',
**kwargs)
self._migrate_pretrained(StableDiffusionSafetyChecker,
repo_id = repo_id,
dest = target_dir / 'stable-diffusion-safety-checker',
**kwargs)
self._migrate_pretrained(
AutoFeatureExtractor, repo_id=repo_id, dest=target_dir / "stable-diffusion-safety-checker", **kwargs
)
self._migrate_pretrained(
StableDiffusionSafetyChecker,
repo_id=repo_id,
dest=target_dir / "stable-diffusion-safety-checker",
**kwargs,
)
except KeyboardInterrupt:
raise
except Exception as e:
logger.error(str(e))
def _model_probe_to_path(self, info: ModelProbeInfo)->Path:
def _model_probe_to_path(self, info: ModelProbeInfo) -> Path:
return Path(self.dest_models, info.base_type.value, info.model_type.value)
def _migrate_pretrained(self, model_class, repo_id: str, dest: Path, force:bool=False, **kwargs):
def _migrate_pretrained(self, model_class, repo_id: str, dest: Path, force: bool = False, **kwargs):
if dest.exists() and not force:
logger.info(f'Skipping existing {dest}')
logger.info(f"Skipping existing {dest}")
return
model = model_class.from_pretrained(repo_id, **kwargs)
self._save_pretrained(model, dest, overwrite=force)
def _save_pretrained(self, model, dest: Path, overwrite: bool=False):
def _save_pretrained(self, model, dest: Path, overwrite: bool = False):
model_name = dest.name
if overwrite:
model.save_pretrained(dest, safe_serialization=True)
else:
download_path = dest.with_name(f'{model_name}.downloading')
download_path = dest.with_name(f"{model_name}.downloading")
model.save_pretrained(download_path, safe_serialization=True)
download_path.replace(dest)
def _download_vae(self, repo_id: str, subfolder:str=None)->Path:
vae = AutoencoderKL.from_pretrained(repo_id, cache_dir=self.root_directory / 'models/hub', subfolder=subfolder)
def _download_vae(self, repo_id: str, subfolder: str = None) -> Path:
vae = AutoencoderKL.from_pretrained(repo_id, cache_dir=self.root_directory / "models/hub", subfolder=subfolder)
info = ModelProbe().heuristic_probe(vae)
_, model_name = repo_id.split('/')
_, model_name = repo_id.split("/")
dest = self._model_probe_to_path(info) / self.unique_name(model_name, info)
vae.save_pretrained(dest, safe_serialization=True)
return dest
def _vae_path(self, vae: Union[str,dict])->Path:
'''
def _vae_path(self, vae: Union[str, dict]) -> Path:
"""
Convert 2.3 VAE stanza to a straight path.
'''
"""
vae_path = None
# First get a path
if isinstance(vae,str):
if isinstance(vae, str):
vae_path = vae
elif isinstance(vae,DictConfig):
if p := vae.get('path'):
elif isinstance(vae, DictConfig):
if p := vae.get("path"):
vae_path = p
elif repo_id := vae.get('repo_id'):
if repo_id=='stabilityai/sd-vae-ft-mse': # this guy is already downloaded
vae_path = 'models/core/convert/sd-vae-ft-mse'
elif repo_id := vae.get("repo_id"):
if repo_id == "stabilityai/sd-vae-ft-mse": # this guy is already downloaded
vae_path = "models/core/convert/sd-vae-ft-mse"
return vae_path
else:
vae_path = self._download_vae(repo_id, vae.get('subfolder'))
vae_path = self._download_vae(repo_id, vae.get("subfolder"))
assert vae_path is not None, "Couldn't find VAE for this model"
@ -307,152 +305,144 @@ class MigrateTo3(object):
dest = self._model_probe_to_path(info) / vae_path.name
if not dest.exists():
if vae_path.is_dir():
self.copy_dir(vae_path,dest)
self.copy_dir(vae_path, dest)
else:
self.copy_file(vae_path,dest)
self.copy_file(vae_path, dest)
vae_path = dest
if vae_path.is_relative_to(self.dest_models):
rel_path = vae_path.relative_to(self.dest_models)
return Path('models',rel_path)
return Path("models", rel_path)
else:
return vae_path
def migrate_repo_id(self, repo_id: str, model_name: str=None, **extra_config):
'''
def migrate_repo_id(self, repo_id: str, model_name: str = None, **extra_config):
"""
Migrate a locally-cached diffusers pipeline identified with a repo_id
'''
"""
dest_dir = self.dest_models
cache = self.root_directory / 'models/hub'
cache = self.root_directory / "models/hub"
kwargs = dict(
cache_dir = cache,
safety_checker = None,
cache_dir=cache,
safety_checker=None,
# local_files_only = True,
)
owner,repo_name = repo_id.split('/')
owner, repo_name = repo_id.split("/")
model_name = model_name or repo_name
model = cache / '--'.join(['models',owner,repo_name])
if len(list(model.glob('snapshots/**/model_index.json')))==0:
model = cache / "--".join(["models", owner, repo_name])
if len(list(model.glob("snapshots/**/model_index.json"))) == 0:
return
revisions = [x.name for x in model.glob('refs/*')]
revisions = [x.name for x in model.glob("refs/*")]
# if an fp16 is available we use that
revision = 'fp16' if len(revisions) > 1 and 'fp16' in revisions else revisions[0]
pipeline = StableDiffusionPipeline.from_pretrained(
repo_id,
revision=revision,
**kwargs)
revision = "fp16" if len(revisions) > 1 and "fp16" in revisions else revisions[0]
pipeline = StableDiffusionPipeline.from_pretrained(repo_id, revision=revision, **kwargs)
info = ModelProbe().heuristic_probe(pipeline)
if not info:
return
if self.mgr.model_exists(model_name, info.base_type, info.model_type):
logger.warning(f'A model named {model_name} already exists at the destination. Skipping migration.')
logger.warning(f"A model named {model_name} already exists at the destination. Skipping migration.")
return
dest = self._model_probe_to_path(info) / model_name
self._save_pretrained(pipeline, dest)
rel_path = Path('models',dest.relative_to(dest_dir))
rel_path = Path("models", dest.relative_to(dest_dir))
self._add_model(model_name, info, rel_path, **extra_config)
def migrate_path(self, location: Path, model_name: str=None, **extra_config):
'''
def migrate_path(self, location: Path, model_name: str = None, **extra_config):
"""
Migrate a model referred to using 'weights' or 'path'
'''
"""
# handle relative paths
dest_dir = self.dest_models
location = self.root_directory / location
model_name = model_name or location.stem
info = ModelProbe().heuristic_probe(location)
if not info:
return
if self.mgr.model_exists(model_name, info.base_type, info.model_type):
logger.warning(f'A model named {model_name} already exists at the destination. Skipping migration.')
logger.warning(f"A model named {model_name} already exists at the destination. Skipping migration.")
return
# uh oh, weights is in the old models directory - move it into the new one
if Path(location).is_relative_to(self.src_paths.models):
dest = Path(dest_dir, info.base_type.value, info.model_type.value, location.name)
if location.is_dir():
self.copy_dir(location,dest)
self.copy_dir(location, dest)
else:
self.copy_file(location,dest)
location = Path('models', info.base_type.value, info.model_type.value, location.name)
self.copy_file(location, dest)
location = Path("models", info.base_type.value, info.model_type.value, location.name)
self._add_model(model_name, info, location, **extra_config)
def _add_model(self,
model_name: str,
info: ModelProbeInfo,
location: Path,
**extra_config):
def _add_model(self, model_name: str, info: ModelProbeInfo, location: Path, **extra_config):
if info.model_type != ModelType.Main:
return
self.mgr.add_model(
model_name = model_name,
base_model = info.base_type,
model_type = info.model_type,
clobber = True,
model_attributes = {
'path': str(location),
'description': f'A {info.base_type.value} {info.model_type.value} model',
'model_format': info.format,
'variant': info.variant_type.value,
**extra_config,
}
)
def migrate_defined_models(self):
'''
Migrate models defined in models.yaml
'''
# find any models referred to in old models.yaml
conf = OmegaConf.load(self.root_directory / 'configs/models.yaml')
for model_name, stanza in conf.items():
self.mgr.add_model(
model_name=model_name,
base_model=info.base_type,
model_type=info.model_type,
clobber=True,
model_attributes={
"path": str(location),
"description": f"A {info.base_type.value} {info.model_type.value} model",
"model_format": info.format,
"variant": info.variant_type.value,
**extra_config,
},
)
def migrate_defined_models(self):
"""
Migrate models defined in models.yaml
"""
# find any models referred to in old models.yaml
conf = OmegaConf.load(self.root_directory / "configs/models.yaml")
for model_name, stanza in conf.items():
try:
passthru_args = {}
if vae := stanza.get('vae'):
if vae := stanza.get("vae"):
try:
passthru_args['vae'] = str(self._vae_path(vae))
passthru_args["vae"] = str(self._vae_path(vae))
except Exception as e:
logger.warning(f'Could not find a VAE matching "{vae}" for model "{model_name}"')
logger.warning(str(e))
if config := stanza.get('config'):
passthru_args['config'] = config
if config := stanza.get("config"):
passthru_args["config"] = config
if description:= stanza.get('description'):
passthru_args['description'] = description
if repo_id := stanza.get('repo_id'):
logger.info(f'Migrating diffusers model {model_name}')
if description := stanza.get("description"):
passthru_args["description"] = description
if repo_id := stanza.get("repo_id"):
logger.info(f"Migrating diffusers model {model_name}")
self.migrate_repo_id(repo_id, model_name, **passthru_args)
elif location := stanza.get('weights'):
logger.info(f'Migrating checkpoint model {model_name}')
elif location := stanza.get("weights"):
logger.info(f"Migrating checkpoint model {model_name}")
self.migrate_path(Path(location), model_name, **passthru_args)
elif location := stanza.get('path'):
logger.info(f'Migrating diffusers model {model_name}')
elif location := stanza.get("path"):
logger.info(f"Migrating diffusers model {model_name}")
self.migrate_path(Path(location), model_name, **passthru_args)
except KeyboardInterrupt:
raise
except Exception as e:
logger.error(str(e))
def migrate(self):
self.create_directory_structure()
# the configure script is doing this
@ -461,67 +451,71 @@ class MigrateTo3(object):
self.migrate_tuning_models()
self.migrate_defined_models()
def _parse_legacy_initfile(root: Path, initfile: Path)->ModelPaths:
'''
def _parse_legacy_initfile(root: Path, initfile: Path) -> ModelPaths:
"""
Returns tuple of (embedding_path, lora_path, controlnet_path)
'''
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
"""
parser = argparse.ArgumentParser(fromfile_prefix_chars="@")
parser.add_argument(
'--embedding_directory',
'--embedding_path',
"--embedding_directory",
"--embedding_path",
type=Path,
dest='embedding_path',
default=Path('embeddings'),
dest="embedding_path",
default=Path("embeddings"),
)
parser.add_argument(
'--lora_directory',
dest='lora_path',
"--lora_directory",
dest="lora_path",
type=Path,
default=Path('loras'),
default=Path("loras"),
)
opt,_ = parser.parse_known_args([f'@{str(initfile)}'])
opt, _ = parser.parse_known_args([f"@{str(initfile)}"])
return ModelPaths(
models = root / 'models',
embeddings = root / str(opt.embedding_path).strip('"'),
loras = root / str(opt.lora_path).strip('"'),
controlnets = root / 'controlnets',
models=root / "models",
embeddings=root / str(opt.embedding_path).strip('"'),
loras=root / str(opt.lora_path).strip('"'),
controlnets=root / "controlnets",
)
def _parse_legacy_yamlfile(root: Path, initfile: Path)->ModelPaths:
'''
def _parse_legacy_yamlfile(root: Path, initfile: Path) -> ModelPaths:
"""
Returns tuple of (embedding_path, lora_path, controlnet_path)
'''
"""
# Don't use the config object because it is unforgiving of version updates
# Just use omegaconf directly
opt = OmegaConf.load(initfile)
paths = opt.InvokeAI.Paths
models = paths.get('models_dir','models')
embeddings = paths.get('embedding_dir','embeddings')
loras = paths.get('lora_dir','loras')
controlnets = paths.get('controlnet_dir','controlnets')
models = paths.get("models_dir", "models")
embeddings = paths.get("embedding_dir", "embeddings")
loras = paths.get("lora_dir", "loras")
controlnets = paths.get("controlnet_dir", "controlnets")
return ModelPaths(
models = root / models,
embeddings = root / embeddings,
loras = root /loras,
controlnets = root / controlnets,
models=root / models,
embeddings=root / embeddings,
loras=root / loras,
controlnets=root / controlnets,
)
def get_legacy_embeddings(root: Path) -> ModelPaths:
path = root / 'invokeai.init'
path = root / "invokeai.init"
if path.exists():
return _parse_legacy_initfile(root, path)
path = root / 'invokeai.yaml'
path = root / "invokeai.yaml"
if path.exists():
return _parse_legacy_yamlfile(root, path)
def do_migrate(src_directory: Path, dest_directory: Path):
"""
Migrate models from src to dest InvokeAI root directories
"""
config_file = dest_directory / 'configs' / 'models.yaml.3'
dest_models = dest_directory / 'models.3'
version_3 = (dest_directory / 'models' / 'core').exists()
config_file = dest_directory / "configs" / "models.yaml.3"
dest_models = dest_directory / "models.3"
version_3 = (dest_directory / "models" / "core").exists()
# Here we create the destination models.yaml file.
# If we are writing into a version 3 directory and the
@ -530,80 +524,80 @@ def do_migrate(src_directory: Path, dest_directory: Path):
# create a new empty one.
if version_3: # write into the dest directory
try:
shutil.copy(dest_directory / 'configs' / 'models.yaml', config_file)
shutil.copy(dest_directory / "configs" / "models.yaml", config_file)
except:
MigrateTo3.initialize_yaml(config_file)
mgr = ModelManager(config_file) # important to initialize BEFORE moving the models directory
(dest_directory / 'models').replace(dest_models)
mgr = ModelManager(config_file) # important to initialize BEFORE moving the models directory
(dest_directory / "models").replace(dest_models)
else:
MigrateTo3.initialize_yaml(config_file)
mgr = ModelManager(config_file)
paths = get_legacy_embeddings(src_directory)
migrator = MigrateTo3(
from_root = src_directory,
to_models = dest_models,
model_manager = mgr,
src_paths = paths
)
migrator = MigrateTo3(from_root=src_directory, to_models=dest_models, model_manager=mgr, src_paths=paths)
migrator.migrate()
print("Migration successful.")
if not version_3:
(dest_directory / 'models').replace(src_directory / 'models.orig')
print(f'Original models directory moved to {dest_directory}/models.orig')
(dest_directory / 'configs' / 'models.yaml').replace(src_directory / 'configs' / 'models.yaml.orig')
print(f'Original models.yaml file moved to {dest_directory}/configs/models.yaml.orig')
config_file.replace(config_file.with_suffix(''))
dest_models.replace(dest_models.with_suffix(''))
(dest_directory / "models").replace(src_directory / "models.orig")
print(f"Original models directory moved to {dest_directory}/models.orig")
(dest_directory / "configs" / "models.yaml").replace(src_directory / "configs" / "models.yaml.orig")
print(f"Original models.yaml file moved to {dest_directory}/configs/models.yaml.orig")
config_file.replace(config_file.with_suffix(""))
dest_models.replace(dest_models.with_suffix(""))
def main():
parser = argparse.ArgumentParser(prog="invokeai-migrate3",
description="""
parser = argparse.ArgumentParser(
prog="invokeai-migrate3",
description="""
This will copy and convert the models directory and the configs/models.yaml from the InvokeAI 2.3 format
'--from-directory' root to the InvokeAI 3.0 '--to-directory' root. These may be abbreviated '--from' and '--to'.a
The old models directory and config file will be renamed 'models.orig' and 'models.yaml.orig' respectively.
It is safe to provide the same directory for both arguments, but it is better to use the invokeai_configure
script, which will perform a full upgrade in place."""
)
parser.add_argument('--from-directory',
dest='src_root',
type=Path,
required=True,
help='Source InvokeAI 2.3 root directory (containing "invokeai.init" or "invokeai.yaml")'
)
parser.add_argument('--to-directory',
dest='dest_root',
type=Path,
required=True,
help='Destination InvokeAI 3.0 directory (containing "invokeai.yaml")'
)
script, which will perform a full upgrade in place.""",
)
parser.add_argument(
"--from-directory",
dest="src_root",
type=Path,
required=True,
help='Source InvokeAI 2.3 root directory (containing "invokeai.init" or "invokeai.yaml")',
)
parser.add_argument(
"--to-directory",
dest="dest_root",
type=Path,
required=True,
help='Destination InvokeAI 3.0 directory (containing "invokeai.yaml")',
)
args = parser.parse_args()
src_root = args.src_root
assert src_root.is_dir(), f"{src_root} is not a valid directory"
assert (src_root / 'models').is_dir(), f"{src_root} does not contain a 'models' subdirectory"
assert (src_root / 'models' / 'hub').exists(), f"{src_root} does not contain a version 2.3 models directory"
assert (src_root / 'invokeai.init').exists() or (src_root / 'invokeai.yaml').exists(), f"{src_root} does not contain an InvokeAI init file."
assert (src_root / "models").is_dir(), f"{src_root} does not contain a 'models' subdirectory"
assert (src_root / "models" / "hub").exists(), f"{src_root} does not contain a version 2.3 models directory"
assert (src_root / "invokeai.init").exists() or (
src_root / "invokeai.yaml"
).exists(), f"{src_root} does not contain an InvokeAI init file."
dest_root = args.dest_root
assert dest_root.is_dir(), f"{dest_root} is not a valid directory"
config = InvokeAIAppConfig.get_config()
config.parse_args(['--root',str(dest_root)])
config.parse_args(["--root", str(dest_root)])
# TODO: revisit - don't rely on invokeai.yaml to exist yet!
dest_is_setup = (dest_root / 'models/core').exists() and (dest_root / 'databases').exists()
dest_is_setup = (dest_root / "models/core").exists() and (dest_root / "databases").exists()
if not dest_is_setup:
import invokeai.frontend.install.invokeai_configure
from invokeai.backend.install.invokeai_configure import initialize_rootdir
initialize_rootdir(dest_root, True)
do_migrate(src_root,dest_root)
do_migrate(src_root, dest_root)
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@ -4,7 +4,7 @@ Utility (backend) functions used by model_install.py
import os
import shutil
import warnings
from dataclasses import dataclass,field
from dataclasses import dataclass, field
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Dict, Callable, Union, Set
@ -28,7 +28,7 @@ warnings.filterwarnings("ignore")
# --------------------------globals-----------------------
config = InvokeAIAppConfig.get_config()
logger = InvokeAILogger.getLogger(name='InvokeAI')
logger = InvokeAILogger.getLogger(name="InvokeAI")
# the initial "configs" dir is now bundled in the `invokeai.configs` package
Dataset_path = Path(configs.__path__[0]) / "INITIAL_MODELS.yaml"
@ -45,59 +45,63 @@ Config_preamble = """
LEGACY_CONFIGS = {
BaseModelType.StableDiffusion1: {
ModelVariantType.Normal: 'v1-inference.yaml',
ModelVariantType.Inpaint: 'v1-inpainting-inference.yaml',
ModelVariantType.Normal: "v1-inference.yaml",
ModelVariantType.Inpaint: "v1-inpainting-inference.yaml",
},
BaseModelType.StableDiffusion2: {
ModelVariantType.Normal: {
SchedulerPredictionType.Epsilon: 'v2-inference.yaml',
SchedulerPredictionType.VPrediction: 'v2-inference-v.yaml',
SchedulerPredictionType.Epsilon: "v2-inference.yaml",
SchedulerPredictionType.VPrediction: "v2-inference-v.yaml",
},
ModelVariantType.Inpaint: {
SchedulerPredictionType.Epsilon: 'v2-inpainting-inference.yaml',
SchedulerPredictionType.VPrediction: 'v2-inpainting-inference-v.yaml',
}
SchedulerPredictionType.Epsilon: "v2-inpainting-inference.yaml",
SchedulerPredictionType.VPrediction: "v2-inpainting-inference-v.yaml",
},
},
BaseModelType.StableDiffusionXL: {
ModelVariantType.Normal: 'sd_xl_base.yaml',
ModelVariantType.Normal: "sd_xl_base.yaml",
},
BaseModelType.StableDiffusionXLRefiner: {
ModelVariantType.Normal: 'sd_xl_refiner.yaml',
ModelVariantType.Normal: "sd_xl_refiner.yaml",
},
}
@dataclass
class ModelInstallList:
'''Class for listing models to be installed/removed'''
"""Class for listing models to be installed/removed"""
install_models: List[str] = field(default_factory=list)
remove_models: List[str] = field(default_factory=list)
@dataclass
class InstallSelections():
install_models: List[str]= field(default_factory=list)
remove_models: List[str]=field(default_factory=list)
@dataclass
class ModelLoadInfo():
class InstallSelections:
install_models: List[str] = field(default_factory=list)
remove_models: List[str] = field(default_factory=list)
@dataclass
class ModelLoadInfo:
name: str
model_type: ModelType
base_type: BaseModelType
path: Path = None
repo_id: str = None
description: str = ''
description: str = ""
installed: bool = False
recommended: bool = False
default: bool = False
class ModelInstall(object):
def __init__(self,
config:InvokeAIAppConfig,
prediction_type_helper: Callable[[Path],SchedulerPredictionType]=None,
model_manager: ModelManager = None,
access_token:str = None):
def __init__(
self,
config: InvokeAIAppConfig,
prediction_type_helper: Callable[[Path], SchedulerPredictionType] = None,
model_manager: ModelManager = None,
access_token: str = None,
):
self.config = config
self.mgr = model_manager or ModelManager(config.model_conf_path)
self.datasets = OmegaConf.load(Dataset_path)
@ -105,66 +109,66 @@ class ModelInstall(object):
self.access_token = access_token or HfFolder.get_token()
self.reverse_paths = self._reverse_paths(self.datasets)
def all_models(self)->Dict[str,ModelLoadInfo]:
'''
def all_models(self) -> Dict[str, ModelLoadInfo]:
"""
Return dict of model_key=>ModelLoadInfo objects.
This method consolidates and simplifies the entries in both
models.yaml and INITIAL_MODELS.yaml so that they can
be treated uniformly. It also sorts the models alphabetically
by their name, to improve the display somewhat.
'''
"""
model_dict = dict()
# first populate with the entries in INITIAL_MODELS.yaml
for key, value in self.datasets.items():
name,base,model_type = ModelManager.parse_key(key)
value['name'] = name
value['base_type'] = base
value['model_type'] = model_type
name, base, model_type = ModelManager.parse_key(key)
value["name"] = name
value["base_type"] = base
value["model_type"] = model_type
model_dict[key] = ModelLoadInfo(**value)
# supplement with entries in models.yaml
installed_models = self.mgr.list_models()
for md in installed_models:
base = md['base_model']
model_type = md['model_type']
name = md['model_name']
base = md["base_model"]
model_type = md["model_type"]
name = md["model_name"]
key = ModelManager.create_key(name, base, model_type)
if key in model_dict:
model_dict[key].installed = True
else:
model_dict[key] = ModelLoadInfo(
name = name,
base_type = base,
model_type = model_type,
path = value.get('path'),
installed = True,
name=name,
base_type=base,
model_type=model_type,
path=value.get("path"),
installed=True,
)
return {x : model_dict[x] for x in sorted(model_dict.keys(),key=lambda y: model_dict[y].name.lower())}
return {x: model_dict[x] for x in sorted(model_dict.keys(), key=lambda y: model_dict[y].name.lower())}
def list_models(self, model_type):
installed = self.mgr.list_models(model_type=model_type)
print(f'Installed models of type `{model_type}`:')
print(f"Installed models of type `{model_type}`:")
for i in installed:
print(f"{i['model_name']}\t{i['base_model']}\t{i['path']}")
# logic here a little reversed to maintain backward compatibility
def starter_models(self, all_models: bool=False)->Set[str]:
def starter_models(self, all_models: bool = False) -> Set[str]:
models = set()
for key, value in self.datasets.items():
name,base,model_type = ModelManager.parse_key(key)
name, base, model_type = ModelManager.parse_key(key)
if all_models or model_type in [ModelType.Main, ModelType.Vae]:
models.add(key)
return models
def recommended_models(self)->Set[str]:
def recommended_models(self) -> Set[str]:
starters = self.starter_models(all_models=True)
return set([x for x in starters if self.datasets[x].get('recommended',False)])
def default_model(self)->str:
return set([x for x in starters if self.datasets[x].get("recommended", False)])
def default_model(self) -> str:
starters = self.starter_models()
defaults = [x for x in starters if self.datasets[x].get('default',False)]
defaults = [x for x in starters if self.datasets[x].get("default", False)]
return defaults[0]
def install(self, selections: InstallSelections):
@ -173,54 +177,57 @@ class ModelInstall(object):
job = 1
jobs = len(selections.remove_models) + len(selections.install_models)
# remove requested models
for key in selections.remove_models:
name,base,mtype = self.mgr.parse_key(key)
logger.info(f'Deleting {mtype} model {name} [{job}/{jobs}]')
name, base, mtype = self.mgr.parse_key(key)
logger.info(f"Deleting {mtype} model {name} [{job}/{jobs}]")
try:
self.mgr.del_model(name,base,mtype)
self.mgr.del_model(name, base, mtype)
except FileNotFoundError as e:
logger.warning(e)
job += 1
# add requested models
for path in selections.install_models:
logger.info(f'Installing {path} [{job}/{jobs}]')
logger.info(f"Installing {path} [{job}/{jobs}]")
try:
self.heuristic_import(path)
except (ValueError, KeyError) as e:
logger.error(str(e))
job += 1
dlogging.set_verbosity(verbosity)
self.mgr.commit()
def heuristic_import(self,
model_path_id_or_url: Union[str,Path],
models_installed: Set[Path]=None,
)->Dict[str, AddModelResult]:
'''
def heuristic_import(
self,
model_path_id_or_url: Union[str, Path],
models_installed: Set[Path] = None,
) -> Dict[str, AddModelResult]:
"""
:param model_path_id_or_url: A Path to a local model to import, or a string representing its repo_id or URL
:param models_installed: Set of installed models, used for recursive invocation
Returns a set of dict objects corresponding to newly-created stanzas in models.yaml.
'''
"""
if not models_installed:
models_installed = dict()
# A little hack to allow nested routines to retrieve info on the requested ID
self.current_id = model_path_id_or_url
path = Path(model_path_id_or_url)
# checkpoint file, or similar
if path.is_file():
models_installed.update({str(path):self._install_path(path)})
models_installed.update({str(path): self._install_path(path)})
# folders style or similar
elif path.is_dir() and any([(path/x).exists() for x in \
{'config.json','model_index.json','learned_embeds.bin','pytorch_lora_weights.bin'}
]
):
elif path.is_dir() and any(
[
(path / x).exists()
for x in {"config.json", "model_index.json", "learned_embeds.bin", "pytorch_lora_weights.bin"}
]
):
models_installed.update({str(model_path_id_or_url): self._install_path(path)})
# recursive scan
@ -229,7 +236,7 @@ class ModelInstall(object):
self.heuristic_import(child, models_installed=models_installed)
# huggingface repo
elif len(str(model_path_id_or_url).split('/')) == 2:
elif len(str(model_path_id_or_url).split("/")) == 2:
models_installed.update({str(model_path_id_or_url): self._install_repo(str(model_path_id_or_url))})
# a URL
@ -237,42 +244,43 @@ class ModelInstall(object):
models_installed.update({str(model_path_id_or_url): self._install_url(model_path_id_or_url)})
else:
raise KeyError(f'{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping')
raise KeyError(f"{str(model_path_id_or_url)} is not recognized as a local path, repo ID or URL. Skipping")
return models_installed
# install a model from a local path. The optional info parameter is there to prevent
# the model from being probed twice in the event that it has already been probed.
def _install_path(self, path: Path, info: ModelProbeInfo=None)->AddModelResult:
info = info or ModelProbe().heuristic_probe(path,self.prediction_helper)
def _install_path(self, path: Path, info: ModelProbeInfo = None) -> AddModelResult:
info = info or ModelProbe().heuristic_probe(path, self.prediction_helper)
if not info:
logger.warning(f'Unable to parse format of {path}')
logger.warning(f"Unable to parse format of {path}")
return None
model_name = path.stem if path.is_file() else path.name
if self.mgr.model_exists(model_name, info.base_type, info.model_type):
raise ValueError(f'A model named "{model_name}" is already installed.')
attributes = self._make_attributes(path,info)
return self.mgr.add_model(model_name = model_name,
base_model = info.base_type,
model_type = info.model_type,
model_attributes = attributes,
)
attributes = self._make_attributes(path, info)
return self.mgr.add_model(
model_name=model_name,
base_model=info.base_type,
model_type=info.model_type,
model_attributes=attributes,
)
def _install_url(self, url: str)->AddModelResult:
def _install_url(self, url: str) -> AddModelResult:
with TemporaryDirectory(dir=self.config.models_path) as staging:
location = download_with_resume(url,Path(staging))
location = download_with_resume(url, Path(staging))
if not location:
logger.error(f'Unable to download {url}. Skipping.')
logger.error(f"Unable to download {url}. Skipping.")
info = ModelProbe().heuristic_probe(location)
dest = self.config.models_path / info.base_type.value / info.model_type.value / location.name
models_path = shutil.move(location,dest)
models_path = shutil.move(location, dest)
# staged version will be garbage-collected at this time
return self._install_path(Path(models_path), info)
def _install_repo(self, repo_id: str)->AddModelResult:
def _install_repo(self, repo_id: str) -> AddModelResult:
hinfo = HfApi().model_info(repo_id)
# we try to figure out how to download this most economically
# list all the files in the repo
files = [x.rfilename for x in hinfo.siblings]
@ -280,42 +288,49 @@ class ModelInstall(object):
with TemporaryDirectory(dir=self.config.models_path) as staging:
staging = Path(staging)
if 'model_index.json' in files:
location = self._download_hf_pipeline(repo_id, staging) # pipeline
if "model_index.json" in files:
location = self._download_hf_pipeline(repo_id, staging) # pipeline
else:
for suffix in ['safetensors','bin']:
if f'pytorch_lora_weights.{suffix}' in files:
location = self._download_hf_model(repo_id, ['pytorch_lora_weights.bin'], staging) # LoRA
for suffix in ["safetensors", "bin"]:
if f"pytorch_lora_weights.{suffix}" in files:
location = self._download_hf_model(repo_id, ["pytorch_lora_weights.bin"], staging) # LoRA
break
elif self.config.precision=='float16' and f'diffusion_pytorch_model.fp16.{suffix}' in files: # vae, controlnet or some other standalone
files = ['config.json', f'diffusion_pytorch_model.fp16.{suffix}']
elif (
self.config.precision == "float16" and f"diffusion_pytorch_model.fp16.{suffix}" in files
): # vae, controlnet or some other standalone
files = ["config.json", f"diffusion_pytorch_model.fp16.{suffix}"]
location = self._download_hf_model(repo_id, files, staging)
break
elif f'diffusion_pytorch_model.{suffix}' in files:
files = ['config.json', f'diffusion_pytorch_model.{suffix}']
elif f"diffusion_pytorch_model.{suffix}" in files:
files = ["config.json", f"diffusion_pytorch_model.{suffix}"]
location = self._download_hf_model(repo_id, files, staging)
break
elif f'learned_embeds.{suffix}' in files:
location = self._download_hf_model(repo_id, [f'learned_embeds.{suffix}'], staging)
elif f"learned_embeds.{suffix}" in files:
location = self._download_hf_model(repo_id, [f"learned_embeds.{suffix}"], staging)
break
if not location:
logger.warning(f'Could not determine type of repo {repo_id}. Skipping install.')
logger.warning(f"Could not determine type of repo {repo_id}. Skipping install.")
return {}
info = ModelProbe().heuristic_probe(location, self.prediction_helper)
if not info:
logger.warning(f'Could not probe {location}. Skipping install.')
logger.warning(f"Could not probe {location}. Skipping install.")
return {}
dest = self.config.models_path / info.base_type.value / info.model_type.value / self._get_model_name(repo_id,location)
dest = (
self.config.models_path
/ info.base_type.value
/ info.model_type.value
/ self._get_model_name(repo_id, location)
)
if dest.exists():
shutil.rmtree(dest)
shutil.copytree(location,dest)
shutil.copytree(location, dest)
return self._install_path(dest, info)
def _get_model_name(self,path_name: str, location: Path)->str:
'''
def _get_model_name(self, path_name: str, location: Path) -> str:
"""
Calculate a name for the model - primitive implementation.
'''
"""
if key := self.reverse_paths.get(path_name):
(name, base, mtype) = ModelManager.parse_key(key)
return name
@ -324,99 +339,103 @@ class ModelInstall(object):
else:
return location.stem
def _make_attributes(self, path: Path, info: ModelProbeInfo)->dict:
def _make_attributes(self, path: Path, info: ModelProbeInfo) -> dict:
model_name = path.name if path.is_dir() else path.stem
description = f'{info.base_type.value} {info.model_type.value} model {model_name}'
description = f"{info.base_type.value} {info.model_type.value} model {model_name}"
if key := self.reverse_paths.get(self.current_id):
if key in self.datasets:
description = self.datasets[key].get('description') or description
description = self.datasets[key].get("description") or description
rel_path = self.relative_to_root(path)
attributes = dict(
path = str(rel_path),
description = str(description),
model_format = info.format,
)
path=str(rel_path),
description=str(description),
model_format=info.format,
)
legacy_conf = None
if info.model_type == ModelType.Main:
attributes.update(dict(variant = info.variant_type,))
if info.format=="checkpoint":
attributes.update(
dict(
variant=info.variant_type,
)
)
if info.format == "checkpoint":
try:
possible_conf = path.with_suffix('.yaml')
possible_conf = path.with_suffix(".yaml")
if possible_conf.exists():
legacy_conf = str(self.relative_to_root(possible_conf))
elif info.base_type == BaseModelType.StableDiffusion2:
legacy_conf = Path(self.config.legacy_conf_dir, LEGACY_CONFIGS[info.base_type][info.variant_type][info.prediction_type])
legacy_conf = Path(
self.config.legacy_conf_dir,
LEGACY_CONFIGS[info.base_type][info.variant_type][info.prediction_type],
)
else:
legacy_conf = Path(self.config.legacy_conf_dir, LEGACY_CONFIGS[info.base_type][info.variant_type])
legacy_conf = Path(
self.config.legacy_conf_dir, LEGACY_CONFIGS[info.base_type][info.variant_type]
)
except KeyError:
legacy_conf = Path(self.config.legacy_conf_dir, 'v1-inference.yaml') # best guess
if info.model_type == ModelType.ControlNet and info.format=="checkpoint":
possible_conf = path.with_suffix('.yaml')
legacy_conf = Path(self.config.legacy_conf_dir, "v1-inference.yaml") # best guess
if info.model_type == ModelType.ControlNet and info.format == "checkpoint":
possible_conf = path.with_suffix(".yaml")
if possible_conf.exists():
legacy_conf = str(self.relative_to_root(possible_conf))
if legacy_conf:
attributes.update(
dict(
config = str(legacy_conf)
)
)
attributes.update(dict(config=str(legacy_conf)))
return attributes
def relative_to_root(self, path: Path)->Path:
def relative_to_root(self, path: Path) -> Path:
root = self.config.root_path
if path.is_relative_to(root):
return path.relative_to(root)
else:
return path
def _download_hf_pipeline(self, repo_id: str, staging: Path)->Path:
'''
def _download_hf_pipeline(self, repo_id: str, staging: Path) -> Path:
"""
This retrieves a StableDiffusion model from cache or remote and then
does a save_pretrained() to the indicated staging area.
'''
_,name = repo_id.split("/")
revisions = ['fp16','main'] if self.config.precision=='float16' else ['main']
"""
_, name = repo_id.split("/")
revisions = ["fp16", "main"] if self.config.precision == "float16" else ["main"]
model = None
for revision in revisions:
try:
model = DiffusionPipeline.from_pretrained(repo_id,revision=revision,safety_checker=None)
model = DiffusionPipeline.from_pretrained(repo_id, revision=revision, safety_checker=None)
except: # most errors are due to fp16 not being present. Fix this to catch other errors
pass
if model:
break
if not model:
logger.error(f'Diffusers model {repo_id} could not be downloaded. Skipping.')
logger.error(f"Diffusers model {repo_id} could not be downloaded. Skipping.")
return None
model.save_pretrained(staging / name, safe_serialization=True)
return staging / name
def _download_hf_model(self, repo_id: str, files: List[str], staging: Path)->Path:
_,name = repo_id.split("/")
def _download_hf_model(self, repo_id: str, files: List[str], staging: Path) -> Path:
_, name = repo_id.split("/")
location = staging / name
paths = list()
for filename in files:
p = hf_download_with_resume(repo_id,
model_dir=location,
model_name=filename,
access_token = self.access_token
)
p = hf_download_with_resume(
repo_id, model_dir=location, model_name=filename, access_token=self.access_token
)
if p:
paths.append(p)
else:
logger.warning(f'Could not download {filename} from {repo_id}.')
return location if len(paths)>0 else None
logger.warning(f"Could not download {filename} from {repo_id}.")
return location if len(paths) > 0 else None
@classmethod
def _reverse_paths(cls,datasets)->dict:
'''
def _reverse_paths(cls, datasets) -> dict:
"""
Reverse mapping from repo_id/path to destination name.
'''
return {v.get('path') or v.get('repo_id') : k for k, v in datasets.items()}
"""
return {v.get("path") or v.get("repo_id"): k for k, v in datasets.items()}
# -------------------------------------
def yes_or_no(prompt: str, default_yes=True):
@ -427,13 +446,12 @@ def yes_or_no(prompt: str, default_yes=True):
else:
return response[0] in ("y", "Y")
# ---------------------------------------------
def hf_download_from_pretrained(
model_class: object, model_name: str, destination: Path, **kwargs
):
logger = InvokeAILogger.getLogger('InvokeAI')
logger.addFilter(lambda x: 'fp16 is not a valid' not in x.getMessage())
def hf_download_from_pretrained(model_class: object, model_name: str, destination: Path, **kwargs):
logger = InvokeAILogger.getLogger("InvokeAI")
logger.addFilter(lambda x: "fp16 is not a valid" not in x.getMessage())
model = model_class.from_pretrained(
model_name,
resume_download=True,
@ -442,13 +460,14 @@ def hf_download_from_pretrained(
model.save_pretrained(destination, safe_serialization=True)
return destination
# ---------------------------------------------
def hf_download_with_resume(
repo_id: str,
model_dir: str,
model_name: str,
model_dest: Path = None,
access_token: str = None,
repo_id: str,
model_dir: str,
model_name: str,
model_dest: Path = None,
access_token: str = None,
) -> Path:
model_dest = model_dest or Path(os.path.join(model_dir, model_name))
os.makedirs(model_dir, exist_ok=True)
@ -467,9 +486,7 @@ def hf_download_with_resume(
resp = requests.get(url, headers=header, stream=True)
total = int(resp.headers.get("content-length", 0))
if (
resp.status_code == 416
): # "range not satisfiable", which means nothing to return
if resp.status_code == 416: # "range not satisfiable", which means nothing to return
logger.info(f"{model_name}: complete file found. Skipping.")
return model_dest
elif resp.status_code == 404:
@ -498,5 +515,3 @@ def hf_download_with_resume(
logger.error(f"An error occurred while downloading {model_name}: {str(e)}")
return None
return model_dest