implemented the following fixes:

Enhancements:
1. Directory-based imports will not attempt to import components of diffusers models.
2. Diffuser directory imports now supported
3. Files that end with .ckpt that are not Stable Diffusion models (such as VAEs) are
   skipped during import.

Bugs identified in Psychedelicious's review:
1. The invokeai-configure form now tracks the current contents of `invokeai.init` correctly.
2. The autoencoders are no longer treated like installable models, but instead are
   mandatory support models. They will no longer appear in `models.yaml`

Bugs identified in Damian's review:
1. If invokeai-model-install is started before the root directory is initialized, it will
   call invokeai-configure to fix the matter.
2. Fix bug that was causing empty `models.yaml` under certain conditions.
3. Made import textbox smaller
4. Hide the "convert to diffusers" options if nothing to import.
This commit is contained in:
Lincoln Stein 2023-02-21 11:47:41 -05:00
parent 55dce6cfdd
commit 3795b40f63
7 changed files with 105 additions and 62 deletions

View File

@ -56,33 +56,3 @@ trinart-2.0:
vae:
repo_id: stabilityai/sd-vae-ft-mse
recommended: False
trinart-characters-2_0:
description: An SD model finetuned with 19.2M anime/manga style images (ckpt version) (4.27 GB)
repo_id: naclbit/trinart_derrida_characters_v2_stable_diffusion
config: v1-inference.yaml
file: derrida_final.ckpt
format: ckpt
vae:
repo_id: naclbit/trinart_derrida_characters_v2_stable_diffusion
file: autoencoder_fix_kl-f8-trinart_characters.ckpt
width: 512
height: 512
recommended: False
autoencoder-840000:
description: StabilityAI improved autoencoder fine-tuned for human faces. Improves legacy .ckpt models (335 MB)
repo_id: stabilityai/sd-vae-ft-mse-original
format: ckpt
config: VAE/default
file: vae-ft-mse-840000-ema-pruned.ckpt
width: 512
height: 512
recommended: True
trinart_vae:
description: Custom autoencoder for trinart_characters for legacy .ckpt models only (335 MB)
repo_id: naclbit/trinart_characters_19.2m_stable_diffusion_v1
config: VAE/trinart
format: ckpt
file: autoencoder_fix_kl-f8-trinart_characters.ckpt
width: 512
height: 512
recommended: False

View File

@ -658,7 +658,7 @@ def import_model(model_path: str, gen, opt, completer, convert=False) -> str:
)
if not imported_name:
print("** model failed to load. Aborting")
print("** Import failed or was skipped")
return
if not _verify_load(imported_name, gen):

View File

@ -560,8 +560,8 @@ class Args(object):
'--outdir',
'-o',
type=str,
help='Directory to save generated images and a log of prompts and seeds. Default: outputs/img-samples',
default='outputs/img-samples',
help='Directory to save generated images and a log of prompts and seeds. Default: ROOTDIR/outputs',
default='outputs',
)
file_group.add_argument(
'--prompt_as_dir',

View File

@ -839,7 +839,7 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
verbosity = dlogging.get_verbosity()
dlogging.set_verbosity_error()
checkpoint = load_file(checkpoint_path,device='cpu') if Path(checkpoint_path).suffix == '.safetensors' else torch.load(checkpoint_path,device='cpu')
checkpoint = load_file(checkpoint_path) if Path(checkpoint_path).suffix == '.safetensors' else torch.load(checkpoint_path)
cache_dir = global_cache_dir('hub')
pipeline_class = StableDiffusionGeneratorPipeline if return_generator_pipeline else StableDiffusionPipeline

View File

@ -21,7 +21,9 @@ from pathlib import Path
from urllib import request
import npyscreen
import torch
import transformers
from diffusers import AutoencoderKL
from huggingface_hub import HfFolder
from huggingface_hub import login as hf_hub_login
from omegaconf import OmegaConf
@ -36,13 +38,14 @@ from transformers import (
import invokeai.configs as configs
from ..args import PRECISION_CHOICES, Args
from ..globals import Globals, global_config_dir
from ..globals import Globals, global_config_dir, global_config_file, global_cache_dir
from ..readline import generic_completer
from .model_install import addModelsForm, process_and_execute
from .model_install_backend import (
default_dataset,
download_from_hf,
recommended_datasets,
hf_download_with_resume,
)
from .widgets import IntTitleSlider
@ -170,10 +173,9 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
else:
print("...exists", file=sys.stderr)
except Exception:
print("...download failed")
print(f"Error downloading {label} model")
print(traceback.format_exc())
print("...download failed", file=sys.stderr)
print(f"Error downloading {label} model", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
# ---------------------------------------------
# this will preload the Bert tokenizer fles
@ -282,6 +284,36 @@ def download_safety_checker():
download_from_hf(StableDiffusionSafetyChecker, safety_model_id)
print("...success", file=sys.stderr)
# -------------------------------------
def download_vaes(precision: str):
print("Installing stabilityai VAE...", file=sys.stderr)
try:
# first the diffusers version
repo_id = 'stabilityai/sd-vae-ft-mse'
args = dict(
cache_dir=global_cache_dir('diffusers'),
)
if precision=='float16':
args.update(
torch_dtype=torch.float16,
revision='fp16'
)
if not AutoencoderKL.from_pretrained(repo_id, **args):
raise Exception(f'download of {repo_id} failed')
repo_id = 'stabilityai/sd-vae-ft-mse-original'
model_name = 'vae-ft-mse-840000-ema-pruned.ckpt'
# next the legacy checkpoint version
if not hf_download_with_resume(
repo_id = repo_id,
model_name = model_name,
model_dir = str(Globals.root / Model_dir / Weights_dir)
):
raise Exception(f'download of {model_name} failed')
print("...downloaded successfully", file=sys.stderr)
except Exception as e:
print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
# -------------------------------------
def get_root(root: str = None) -> str:
@ -297,6 +329,8 @@ class editOptsForm(npyscreen.FormMultiPage):
def create(self):
program_opts = self.parentApp.program_opts
old_opts = self.parentApp.invokeai_opts
with open('log.txt','w') as f:
f.write(str(old_opts))
first_time = not (Globals.root / Globals.initfile).exists()
access_token = HfFolder.get_token()
@ -585,10 +619,13 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam
return editApp.new_opts()
def default_startup_options() -> Namespace:
def default_startup_options(init_file: Path) -> Namespace:
opts = Args().parse_args([])
opts.outdir = str(default_output_dir())
opts.safety_checker = True
outdir = Path(opts.outdir)
if not outdir.is_absolute():
opts.outdir = str(Globals.root / opts.outdir)
if not init_file.exists():
opts.safety_checker = True
return opts
@ -627,9 +664,9 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
# -------------------------------------
def run_console_ui(program_opts: Namespace) -> (Namespace, Namespace):
def run_console_ui(program_opts: Namespace, initfile: Path=None) -> (Namespace, Namespace):
# parse_args() will read from init file if present
invokeai_opts = default_startup_options()
invokeai_opts = default_startup_options(initfile)
editApp = EditOptApplication(program_opts, invokeai_opts)
editApp.run()
if editApp.user_cancelled:
@ -766,13 +803,13 @@ def main():
# We check for to see if the runtime directory is correctly initialized.
init_file = Path(Globals.root, Globals.initfile)
if not init_file.exists():
if not init_file.exists() or not global_config_file().exists():
initialize_rootdir(Globals.root, opt.yes_to_all)
if opt.yes_to_all:
write_default_options(opt, init_file)
else:
init_options, models_to_download = run_console_ui(opt)
init_options, models_to_download = run_console_ui(opt, init_file)
if init_options:
write_opts(init_options, init_file)
else:
@ -792,6 +829,7 @@ def main():
download_codeformer()
download_clipseg()
download_safety_checker()
download_vaes(init_options.precision)
if opt.skip_sd_weights:
print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")

View File

@ -19,13 +19,12 @@ from typing import List
import npyscreen
import torch
from datetime import datetime
from pathlib import Path
from npyscreen import widget
from omegaconf import OmegaConf
from ..devices import choose_precision, choose_torch_device
from ..globals import Globals
from ..globals import Globals, global_config_dir
from .widgets import MultiSelectColumns, TextBox
from .model_install_backend import (Dataset_path, default_config_file,
install_requested_models,
@ -109,7 +108,7 @@ class addModelsForm(npyscreen.FormMultiPage):
self.nextrely -= 1
# if user has already installed some initial models, then don't patronize them
# by showing more recommendations
show_recommended = self.installed_models is None or len(self.installed_models)==0
show_recommended = not self.existing_models
self.models_selected = self.add_widget_intelligent(
npyscreen.MultiSelect,
name="Install Starter Models",
@ -137,7 +136,7 @@ class addModelsForm(npyscreen.FormMultiPage):
self.nextrely -= 1
self.import_model_paths = self.add_widget_intelligent(
TextBox,
max_height=8,
max_height=5,
scroll_exit=True,
editable=True,
relx=4
@ -165,6 +164,7 @@ class addModelsForm(npyscreen.FormMultiPage):
relx = 4,
scroll_exit=True,
)
self.nextrely += 1
self.convert_models = self.add_widget_intelligent(
npyscreen.TitleSelectOne,
name='== CONVERT IMPORTED MODELS INTO DIFFUSERS==',
@ -172,6 +172,7 @@ class addModelsForm(npyscreen.FormMultiPage):
value=0,
begin_entry_at=4,
max_height=4,
hidden=True, # will appear when imported models box is edited
scroll_exit=True,
)
self.cancel = self.add_widget_intelligent(
@ -205,9 +206,22 @@ class addModelsForm(npyscreen.FormMultiPage):
for i in [self.autoload_directory,self.autoscan_on_startup]:
self.show_directory_fields.addVisibleWhenSelected(i)
self.show_directory_fields.when_value_edited = self._clear_scan_directory
self.import_model_paths.when_value_edited = self._show_hide_convert
self.autoload_directory.when_value_edited = self._show_hide_convert
def resize(self):
super().resize()
self.models_selected.values = self._get_starter_model_labels()
def _clear_scan_directory(self):
if not self.show_directory_fields.value:
self.autoload_directory.value = ''
def _show_hide_convert(self):
model_paths = self.import_model_paths.value or ''
autoload_directory = self.autoload_directory.value or ''
self.convert_models.hidden = len(model_paths)==0 and len(autoload_directory)==0
def _get_starter_model_labels(self)->List[str]:
window_height, window_width = curses.initscr().getmaxyx()
@ -394,6 +408,12 @@ def main():
# setting a global here
Globals.root = os.path.expanduser(get_root(opt.root) or "")
if not global_config_dir().exists():
print('>> Your InvokeAI root directory is not set up. Calling invokeai-configure.')
import ldm.invoke.config.invokeai_configure
ldm.invoke.config.invokeai_configure.main()
sys.exit(0)
try:
select_and_download_models(opt)
except AssertionError as e:

View File

@ -136,6 +136,7 @@ class ModelManager(object):
for model_name in self.config:
if self.config[model_name].get("default"):
return model_name
return list(self.config.keys())[0] # first one
def set_default_model(self, model_name: str) -> None:
"""
@ -633,18 +634,21 @@ class ModelManager(object):
models.yaml file.
"""
model_name = model_name or Path(repo_or_path).stem
description = description or f"imported diffusers model {model_name}"
description = model_description or f"imported diffusers model {model_name}"
new_config = dict(
description=description,
description=model_description,
vae=vae,
format="diffusers",
)
print(f'DEBUG: here i am 1')
if isinstance(repo_or_path, Path) and repo_or_path.exists():
new_config.update(path=str(repo_or_path))
else:
new_config.update(repo_id=repo_or_path)
print(f'DEBUG: here i am 2')
self.add_model(model_name, new_config, True)
print(f'DEBUG: config = {self.config}')
if commit_to_conf:
self.commit(commit_to_conf)
return model_name
@ -778,8 +782,12 @@ class ModelManager(object):
model_path = self._resolve_path(thing, 'models/ldm/stable-diffusion-v1') # _resolve_path does a download if needed
elif Path(thing).is_file() and thing.endswith(('.ckpt','.safetensors')):
print(f' | {thing} appears to be a checkpoint file on disk')
model_path = self._resolve_path(thing, 'models/ldm/stable-diffusion-v1')
if Path(thing).stem in ['model','diffusion_pytorch_model']:
print(f' | {Path(thing).name} appears to be part of a diffusers model. Skipping import')
return
else:
print(f' | {thing} appears to be a checkpoint file on disk')
model_path = self._resolve_path(thing, 'models/ldm/stable-diffusion-v1')
elif Path(thing).is_dir() and Path(thing, 'model_index.json').exists():
print(f' | {thing} appears to be a diffusers file on disk')
@ -792,11 +800,16 @@ class ModelManager(object):
)
elif Path(thing).is_dir():
print(f'>> {thing} appears to be a directory. Will scan for models to import')
for m in list(Path(thing).rglob('*.ckpt')) + list(Path(thing).rglob('*.safetensors')):
if model_name := self.heuristic_import(str(m), convert, commit_to_conf=commit_to_conf):
print(f' >> {model_name} successfully imported')
return model_name
if (Path(thing) / 'model_index.json').exists():
print(f'>> {thing} appears to be a diffusers model.')
model_name = self.import_diffuser_model(thing, commit_to_conf=commit_to_conf)
else:
print(f'>> {thing} appears to be a directory. Will scan for models to import')
for m in list(Path(thing).rglob('*.ckpt')) + list(Path(thing).rglob('*.safetensors')):
if model_name := self.heuristic_import(str(m), convert, commit_to_conf=commit_to_conf):
print(f' >> {model_name} successfully imported')
return model_name
elif re.match(r'^[\w.+-]+/[\w.+-]+$', thing):
print(f' | {thing} appears to be a HuggingFace diffusers repo_id')
@ -831,9 +844,9 @@ class ModelManager(object):
model_config_file = Path(Globals.root,'configs/stable-diffusion/v2-inference-v.yaml')
convert = True
else:
print(f'** {thing} is a legacy checkpoint file of unkown format. Will treat as a regular v1.X model')
model_config_file = Path(Globals.root,'configs/stable-diffusion/v1-inference.yaml')
print(f'** {thing} is a legacy checkpoint file but not in a known Stable Diffusion model. Skipping import')
return
if convert:
diffuser_path = Path(Globals.root, 'models',Globals.converted_ckpts_dir, model_path.stem)
model_name = self.convert_and_import(
@ -854,6 +867,8 @@ class ModelManager(object):
vae=str(Path(Globals.root,'models/ldm/stable-diffusion-v1/vae-ft-mse-840000-ema-pruned.ckpt')),
commit_to_conf=commit_to_conf,
)
if commit_to_conf:
self.commit(commit_to_conf)
return model_name
def convert_and_import(