fix several issues with Windows installs

1. resize installer window to give more room for configure and download forms
2. replace '\' with '/' in directory names to allow user to drag-and-drop
   folders into the dialogue boxes that accept directories.
3. similar change in CLI for the !import_model and !convert_model commands
4. better error reporting when a model download fails due to network errors
5. put the launcher scripts into a loop so that menu reappears after
   invokeai, merge script, etc exits. User can quit with "Q".
6. do not try to download fp16 of sd-ft-mse-vae, since it doesn't exist.
7. cleaned up status reporting when installing models
This commit is contained in:
Lincoln Stein
2023-02-23 00:43:25 -05:00
parent 3083356cf0
commit 9b157b6532
10 changed files with 66 additions and 68 deletions

View File

@ -496,6 +496,7 @@ def main_loop(gen, opt):
def do_command(command: str, gen, opt: Args, completer) -> tuple:
global infile
operation = "generate" # default operation, alternative is 'postprocess'
command = command.replace('\\','/') # windows
if command.startswith(
"!dream"
@ -630,7 +631,6 @@ def import_model(model_path: str, gen, opt, completer, convert=False) -> str:
(3) a huggingface repository id; or (4) a local directory containing a
diffusers model.
"""
model_path = model_path.replace("\\", "/") # windows
default_name = Path(model_path).stem
model_name = None
model_desc = None

View File

@ -1 +1 @@
__version__='2.3.1+rc1'
__version__='2.3.1+rc2'

View File

@ -66,7 +66,7 @@ SD_Configs = Path(global_config_dir()) / "stable-diffusion"
Datasets = OmegaConf.load(Dataset_path)
# minimum size for the UI
MIN_COLS = 120
MIN_COLS = 135
MIN_LINES = 45
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
@ -165,7 +165,6 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
print(f"Installing {label} model file {model_url}...", end="", file=sys.stderr)
if not os.path.exists(model_dest):
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
print("", file=sys.stderr)
request.urlretrieve(
model_url, model_dest, ProgressBar(os.path.basename(model_dest))
)
@ -183,26 +182,22 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
def download_bert():
print(
"Installing bert tokenizer...",
end="",
file=sys.stderr,
file=sys.stderr
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from transformers import BertTokenizerFast
download_from_hf(BertTokenizerFast, "bert-base-uncased")
print("...success", file=sys.stderr)
# ---------------------------------------------
def download_clip():
print("Installing CLIP model...", file=sys.stderr)
version = "openai/clip-vit-large-patch14"
print("Tokenizer...", file=sys.stderr, end="")
print("Tokenizer...", file=sys.stderr)
download_from_hf(CLIPTokenizer, version)
print("Text model...", file=sys.stderr, end="")
print("Text model...", file=sys.stderr)
download_from_hf(CLIPTextModel, version)
print("...success", file=sys.stderr)
# ---------------------------------------------
@ -255,7 +250,7 @@ def download_codeformer():
# ---------------------------------------------
def download_clipseg():
print("Installing clipseg model for text-based masking...", end="", file=sys.stderr)
print("Installing clipseg model for text-based masking...", file=sys.stderr)
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
try:
download_from_hf(AutoProcessor, CLIPSEG_MODEL)
@ -263,7 +258,6 @@ def download_clipseg():
except Exception:
print("Error installing clipseg model:")
print(traceback.format_exc())
print("...success", file=sys.stderr)
# -------------------------------------
@ -279,15 +273,14 @@ def download_safety_checker():
print(traceback.format_exc())
return
safety_model_id = "CompVis/stable-diffusion-safety-checker"
print("AutoFeatureExtractor...", end="", file=sys.stderr)
print("AutoFeatureExtractor...", file=sys.stderr)
download_from_hf(AutoFeatureExtractor, safety_model_id)
print("StableDiffusionSafetyChecker...", end="", file=sys.stderr)
print("StableDiffusionSafetyChecker...", file=sys.stderr)
download_from_hf(StableDiffusionSafetyChecker, safety_model_id)
print("...success", file=sys.stderr)
# -------------------------------------
def download_vaes(precision: str):
def download_vaes():
print("Installing stabilityai VAE...", file=sys.stderr)
try:
# first the diffusers version
@ -295,8 +288,6 @@ def download_vaes(precision: str):
args = dict(
cache_dir=global_cache_dir("diffusers"),
)
if precision == "float16":
args.update(torch_dtype=torch.float16, revision="fp16")
if not AutoencoderKL.from_pretrained(repo_id, **args):
raise Exception(f"download of {repo_id} failed")
@ -309,7 +300,6 @@ def download_vaes(precision: str):
model_dir=str(Globals.root / Model_dir / Weights_dir),
):
raise Exception(f"download of {model_name} failed")
print("...downloaded successfully", file=sys.stderr)
except Exception as e:
print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
@ -706,6 +696,9 @@ def write_opts(opts: Namespace, init_file: Path):
args_to_skip = re.compile(
"^--?(o|out|no-xformer|xformer|no-ckpt|ckpt|free|no-nsfw|nsfw|prec|max_load|embed|always|ckpt|free_gpu)"
)
# fix windows paths
opts.outdir = opts.outdir.replace('\\','/')
opts.embedding_path = opts.embedding_path.replace('\\','/')
new_file = f"{init_file}.new"
try:
lines = [x.strip() for x in open(init_file, "r").readlines()]
@ -845,7 +838,7 @@ def main():
download_codeformer()
download_clipseg()
download_safety_checker()
download_vaes(init_options.precision)
download_vaes()
if opt.skip_sd_weights:
print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
@ -856,10 +849,6 @@ def main():
postscript(errors=errors)
except KeyboardInterrupt:
print("\nGoodbye! Come back soon.")
except Exception as e:
print(f'\nA problem occurred during initialization.\nThe error was: "{str(e)}"')
print(traceback.format_exc())
# -------------------------------------
if __name__ == "__main__":

View File

@ -12,7 +12,6 @@ The work is actually done in backend code in model_install_backend.py.
import argparse
import os
import sys
import traceback
from argparse import Namespace
from pathlib import Path
from typing import List
@ -482,7 +481,7 @@ def main():
sys.exit(-1)
except KeyboardInterrupt:
print("\nGoodbye! Come back soon.")
except (widget.NotEnoughSpaceForWidget, Exception) as e:
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
print(
"** Insufficient vertical space for the interface. Please make your window taller and try again"
@ -491,11 +490,6 @@ def main():
print(
"** Insufficient horizontal space for the interface. Please make your window wider and try again."
)
else:
print(f"** An error has occurred: {str(e)}")
traceback.print_exc()
sys.exit(-1)
# -------------------------------------
if __name__ == "__main__":

View File

@ -120,12 +120,13 @@ def install_requested_models(
argument = '--autoconvert' if convert_to_diffusers else '--autoimport'
initfile = Path(Globals.root, Globals.initfile)
replacement = Path(Globals.root, f'{Globals.initfile}.new')
directory = str(scan_directory).replace('\\','/')
with open(initfile,'r') as input:
with open(replacement,'w') as output:
while line := input.readline():
if not line.startswith(argument):
output.writelines([line])
output.writelines([f'{argument} {str(scan_directory)}'])
output.writelines([f'{argument} {directory}'])
os.replace(replacement,initfile)
# -------------------------------------
@ -234,7 +235,6 @@ def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
def download_from_hf(
model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs
):
print("", file=sys.stderr) # to prevent tqdm from overwriting
path = global_cache_dir(cache_subdir)
model = model_class.from_pretrained(
model_name,

View File

@ -4,25 +4,25 @@ Widget class definitions used by model_select.py, merge_diffusers.py and textual
import math
import platform
import npyscreen
import os
import sys
import curses
import termios
import struct
import fcntl
from shutil import get_terminal_size
# -------------------------------------
def set_terminal_size(columns: int, lines: int):
os = platform.uname().system
if os=="Windows":
OS = platform.uname().system
if OS=="Windows":
os.system(f'mode con: cols={columns} lines={lines}')
elif os in ['Darwin', 'Linux']:
elif OS in ['Darwin', 'Linux']:
import termios
import fcntl
winsize = struct.pack("HHHH", lines, columns, 0, 0)
fcntl.ioctl(0, termios.TIOCSWINSZ, winsize)
fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize)
sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=lines, cols=columns))
sys.stdout.flush()
def set_min_terminal_size(min_cols: int, min_lines: int):
# make sure there's enough room for the ui

View File

@ -428,8 +428,6 @@ class ModelManager(object):
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
tic = time.time()
# this does the work
if not os.path.isabs(config):
config = os.path.join(Globals.root, config)
@ -642,21 +640,18 @@ class ModelManager(object):
models.yaml file.
"""
model_name = model_name or Path(repo_or_path).stem
description = model_description or f"imported diffusers model {model_name}"
model_description = model_description or f"imported diffusers model {model_name}"
new_config = dict(
description=model_description,
vae=vae,
format="diffusers",
)
print(f"DEBUG: here i am 1")
if isinstance(repo_or_path, Path) and repo_or_path.exists():
new_config.update(path=str(repo_or_path))
else:
new_config.update(repo_id=repo_or_path)
print(f"DEBUG: here i am 2")
self.add_model(model_name, new_config, True)
print(f"DEBUG: config = {self.config}")
if commit_to_conf:
self.commit(commit_to_conf)
return model_name