fix several issues with Windows installs

1. resize installer window to give more room for configure and download forms
2. replace '\' with '/' in directory names to allow user to drag-and-drop
   folders into the dialogue boxes that accept directories.
3. similar change in CLI for the !import_model and !convert_model commands
4. better error reporting when a model download fails due to network errors
5. put the launcher scripts into a loop so that menu reappears after
   invokeai, merge script, etc exits. User can quit with "Q".
6. do not try to download fp16 of sd-ft-mse-vae, since it doesn't exist.
7. cleaned up status reporting when installing models
This commit is contained in:
Lincoln Stein 2023-02-23 00:43:25 -05:00
parent 3083356cf0
commit 9b157b6532
10 changed files with 66 additions and 68 deletions

View File

@ -337,6 +337,7 @@ class InvokeAiInstance:
new_argv.append(el) new_argv.append(el)
sys.argv = new_argv sys.argv = new_argv
import requests # to catch download exceptions
from messages import introduction from messages import introduction
introduction() introduction()
@ -350,16 +351,16 @@ class InvokeAiInstance:
try: try:
invokeai_configure.main() invokeai_configure.main()
succeeded = True succeeded = True
except ConnectionError as e: except requests.exceptions.ConnectionError as e:
print(f'A network error was encountered during configuration and download: {str(e)}') print(f'\nA network error was encountered during configuration and download: {str(e)}')
except OSError as e: except OSError as e:
print(f'An OS error was encountered during configuration and download: {str(e)}') print(f'\nAn OS error was encountered during configuration and download: {str(e)}')
except Exception as e: except Exception as e:
print(f'A problem was encountered during the configuration and download steps: {str(e)}') print(f'\nA problem was encountered during the configuration and download steps: {str(e)}')
finally: finally:
if not succeeded: if not succeeded:
print('You may be able to finish the process by launching "invoke.sh" or "invoke.bat"') print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
print('from within the "invokeai" directory, and choosing options 5 and/or 6.') print('and choose option 7 to fix a broken install, optionally followed by option 5 to install models.')
print('Alternatively you can relaunch the installer.') print('Alternatively you can relaunch the installer.')
def install_user_scripts(self): def install_user_scripts(self):

View File

@ -6,8 +6,9 @@ setlocal
call .venv\Scripts\activate.bat call .venv\Scripts\activate.bat
set INVOKEAI_ROOT=. set INVOKEAI_ROOT=.
:start
echo Do you want to generate images using the echo Do you want to generate images using the
echo 1. command-line echo 1. command-line interface
echo 2. browser-based UI echo 2. browser-based UI
echo 3. run textual inversion training echo 3. run textual inversion training
echo 4. merge models (diffusers type only) echo 4. merge models (diffusers type only)
@ -17,7 +18,8 @@ echo 7. re-run the configure script to fix a broken install
echo 8. open the developer console echo 8. open the developer console
echo 9. update InvokeAI echo 9. update InvokeAI
echo 10. command-line help echo 10. command-line help
set /P restore="Please enter 1-10: [2] " echo Q - quit
set /P restore="Please enter 1-10, Q: [2] "
if not defined restore set restore=2 if not defined restore set restore=2
IF /I "%restore%" == "1" ( IF /I "%restore%" == "1" (
echo Starting the InvokeAI command-line.. echo Starting the InvokeAI command-line..
@ -60,9 +62,19 @@ IF /I "%restore%" == "1" (
python .venv\Scripts\invokeai.exe --help %* python .venv\Scripts\invokeai.exe --help %*
pause pause
exit /b exit /b
) ELSE IF /I "%restore%" == "q" (
echo Goodbye!
goto ending
) ELSE ( ) ELSE (
echo Invalid selection echo Invalid selection
pause pause
exit /b exit /b
) )
goto start
endlocal endlocal
pause
:ending
exit /b

View File

@ -24,9 +24,11 @@ if [ "$(uname -s)" == "Darwin" ]; then
export PYTORCH_ENABLE_MPS_FALLBACK=1 export PYTORCH_ENABLE_MPS_FALLBACK=1
fi fi
while true
do
if [ "$0" != "bash" ]; then if [ "$0" != "bash" ]; then
echo "Do you want to generate images using the" echo "Do you want to generate images using the"
echo "1. command-line" echo "1. command-line interface"
echo "2. browser-based UI" echo "2. browser-based UI"
echo "3. run textual inversion training" echo "3. run textual inversion training"
echo "4. merge models (diffusers type only)" echo "4. merge models (diffusers type only)"
@ -35,35 +37,36 @@ if [ "$0" != "bash" ]; then
echo "7. re-run the configure script to fix a broken install" echo "7. re-run the configure script to fix a broken install"
echo "8. open the developer console" echo "8. open the developer console"
echo "9. update InvokeAI" echo "9. update InvokeAI"
echo "10. command-line help " echo "10. command-line help"
echo "Q - Quit"
echo "" echo ""
read -p "Please enter 1-10: [2] " yn read -p "Please enter 1-10, Q: [2] " yn
choice=${yn:='2'} choice=${yn:='2'}
case $choice in case $choice in
1) 1)
echo "Starting the InvokeAI command-line..." echo "Starting the InvokeAI command-line..."
exec invokeai $@ invokeai $@
;; ;;
2) 2)
echo "Starting the InvokeAI browser-based UI..." echo "Starting the InvokeAI browser-based UI..."
exec invokeai --web $@ invokeai --web $@
;; ;;
3) 3)
echo "Starting Textual Inversion:" echo "Starting Textual Inversion:"
exec invokeai-ti --gui $@ invokeai-ti --gui $@
;; ;;
4) 4)
echo "Merging Models:" echo "Merging Models:"
exec invokeai-merge --gui $@ invokeai-merge --gui $@
;; ;;
5) 5)
exec invokeai-model-install --root ${INVOKEAI_ROOT} invokeai-model-install --root ${INVOKEAI_ROOT}
;; ;;
6) 6)
exec invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
;; ;;
7) 7)
exec invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
;; ;;
8) 8)
echo "Developer Console:" echo "Developer Console:"
@ -72,10 +75,13 @@ if [ "$0" != "bash" ]; then
;; ;;
9) 9)
echo "Update:" echo "Update:"
exec invokeai-update invokeai-update
;; ;;
10) 10)
exec invokeai --help invokeai --help
;;
[qQ])
exit 0
;; ;;
*) *)
echo "Invalid selection" echo "Invalid selection"
@ -86,3 +92,4 @@ else # in developer console
echo "Press ^D to exit" echo "Press ^D to exit"
export PS1="(InvokeAI) \u@\h \w> " export PS1="(InvokeAI) \u@\h \w> "
fi fi
done

View File

@ -496,6 +496,7 @@ def main_loop(gen, opt):
def do_command(command: str, gen, opt: Args, completer) -> tuple: def do_command(command: str, gen, opt: Args, completer) -> tuple:
global infile global infile
operation = "generate" # default operation, alternative is 'postprocess' operation = "generate" # default operation, alternative is 'postprocess'
command = command.replace('\\','/') # windows
if command.startswith( if command.startswith(
"!dream" "!dream"
@ -630,7 +631,6 @@ def import_model(model_path: str, gen, opt, completer, convert=False) -> str:
(3) a huggingface repository id; or (4) a local directory containing a (3) a huggingface repository id; or (4) a local directory containing a
diffusers model. diffusers model.
""" """
model_path = model_path.replace("\\", "/") # windows
default_name = Path(model_path).stem default_name = Path(model_path).stem
model_name = None model_name = None
model_desc = None model_desc = None

View File

@ -1 +1 @@
__version__='2.3.1+rc1' __version__='2.3.1+rc2'

View File

@ -66,7 +66,7 @@ SD_Configs = Path(global_config_dir()) / "stable-diffusion"
Datasets = OmegaConf.load(Dataset_path) Datasets = OmegaConf.load(Dataset_path)
# minimum size for the UI # minimum size for the UI
MIN_COLS = 120 MIN_COLS = 135
MIN_LINES = 45 MIN_LINES = 45
INIT_FILE_PREAMBLE = """# InvokeAI initialization file INIT_FILE_PREAMBLE = """# InvokeAI initialization file
@ -165,7 +165,6 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
print(f"Installing {label} model file {model_url}...", end="", file=sys.stderr) print(f"Installing {label} model file {model_url}...", end="", file=sys.stderr)
if not os.path.exists(model_dest): if not os.path.exists(model_dest):
os.makedirs(os.path.dirname(model_dest), exist_ok=True) os.makedirs(os.path.dirname(model_dest), exist_ok=True)
print("", file=sys.stderr)
request.urlretrieve( request.urlretrieve(
model_url, model_dest, ProgressBar(os.path.basename(model_dest)) model_url, model_dest, ProgressBar(os.path.basename(model_dest))
) )
@ -183,26 +182,22 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
def download_bert(): def download_bert():
print( print(
"Installing bert tokenizer...", "Installing bert tokenizer...",
end="", file=sys.stderr
file=sys.stderr,
) )
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=DeprecationWarning)
from transformers import BertTokenizerFast from transformers import BertTokenizerFast
download_from_hf(BertTokenizerFast, "bert-base-uncased") download_from_hf(BertTokenizerFast, "bert-base-uncased")
print("...success", file=sys.stderr)
# --------------------------------------------- # ---------------------------------------------
def download_clip(): def download_clip():
print("Installing CLIP model...", file=sys.stderr) print("Installing CLIP model...", file=sys.stderr)
version = "openai/clip-vit-large-patch14" version = "openai/clip-vit-large-patch14"
print("Tokenizer...", file=sys.stderr, end="") print("Tokenizer...", file=sys.stderr)
download_from_hf(CLIPTokenizer, version) download_from_hf(CLIPTokenizer, version)
print("Text model...", file=sys.stderr, end="") print("Text model...", file=sys.stderr)
download_from_hf(CLIPTextModel, version) download_from_hf(CLIPTextModel, version)
print("...success", file=sys.stderr)
# --------------------------------------------- # ---------------------------------------------
@ -255,7 +250,7 @@ def download_codeformer():
# --------------------------------------------- # ---------------------------------------------
def download_clipseg(): def download_clipseg():
print("Installing clipseg model for text-based masking...", end="", file=sys.stderr) print("Installing clipseg model for text-based masking...", file=sys.stderr)
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined" CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
try: try:
download_from_hf(AutoProcessor, CLIPSEG_MODEL) download_from_hf(AutoProcessor, CLIPSEG_MODEL)
@ -263,7 +258,6 @@ def download_clipseg():
except Exception: except Exception:
print("Error installing clipseg model:") print("Error installing clipseg model:")
print(traceback.format_exc()) print(traceback.format_exc())
print("...success", file=sys.stderr)
# ------------------------------------- # -------------------------------------
@ -279,15 +273,14 @@ def download_safety_checker():
print(traceback.format_exc()) print(traceback.format_exc())
return return
safety_model_id = "CompVis/stable-diffusion-safety-checker" safety_model_id = "CompVis/stable-diffusion-safety-checker"
print("AutoFeatureExtractor...", end="", file=sys.stderr) print("AutoFeatureExtractor...", file=sys.stderr)
download_from_hf(AutoFeatureExtractor, safety_model_id) download_from_hf(AutoFeatureExtractor, safety_model_id)
print("StableDiffusionSafetyChecker...", end="", file=sys.stderr) print("StableDiffusionSafetyChecker...", file=sys.stderr)
download_from_hf(StableDiffusionSafetyChecker, safety_model_id) download_from_hf(StableDiffusionSafetyChecker, safety_model_id)
print("...success", file=sys.stderr)
# ------------------------------------- # -------------------------------------
def download_vaes(precision: str): def download_vaes():
print("Installing stabilityai VAE...", file=sys.stderr) print("Installing stabilityai VAE...", file=sys.stderr)
try: try:
# first the diffusers version # first the diffusers version
@ -295,8 +288,6 @@ def download_vaes(precision: str):
args = dict( args = dict(
cache_dir=global_cache_dir("diffusers"), cache_dir=global_cache_dir("diffusers"),
) )
if precision == "float16":
args.update(torch_dtype=torch.float16, revision="fp16")
if not AutoencoderKL.from_pretrained(repo_id, **args): if not AutoencoderKL.from_pretrained(repo_id, **args):
raise Exception(f"download of {repo_id} failed") raise Exception(f"download of {repo_id} failed")
@ -309,7 +300,6 @@ def download_vaes(precision: str):
model_dir=str(Globals.root / Model_dir / Weights_dir), model_dir=str(Globals.root / Model_dir / Weights_dir),
): ):
raise Exception(f"download of {model_name} failed") raise Exception(f"download of {model_name} failed")
print("...downloaded successfully", file=sys.stderr)
except Exception as e: except Exception as e:
print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr) print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr) print(traceback.format_exc(), file=sys.stderr)
@ -706,6 +696,9 @@ def write_opts(opts: Namespace, init_file: Path):
args_to_skip = re.compile( args_to_skip = re.compile(
"^--?(o|out|no-xformer|xformer|no-ckpt|ckpt|free|no-nsfw|nsfw|prec|max_load|embed|always|ckpt|free_gpu)" "^--?(o|out|no-xformer|xformer|no-ckpt|ckpt|free|no-nsfw|nsfw|prec|max_load|embed|always|ckpt|free_gpu)"
) )
# fix windows paths
opts.outdir = opts.outdir.replace('\\','/')
opts.embedding_path = opts.embedding_path.replace('\\','/')
new_file = f"{init_file}.new" new_file = f"{init_file}.new"
try: try:
lines = [x.strip() for x in open(init_file, "r").readlines()] lines = [x.strip() for x in open(init_file, "r").readlines()]
@ -845,7 +838,7 @@ def main():
download_codeformer() download_codeformer()
download_clipseg() download_clipseg()
download_safety_checker() download_safety_checker()
download_vaes(init_options.precision) download_vaes()
if opt.skip_sd_weights: if opt.skip_sd_weights:
print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **") print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
@ -856,10 +849,6 @@ def main():
postscript(errors=errors) postscript(errors=errors)
except KeyboardInterrupt: except KeyboardInterrupt:
print("\nGoodbye! Come back soon.") print("\nGoodbye! Come back soon.")
except Exception as e:
print(f'\nA problem occurred during initialization.\nThe error was: "{str(e)}"')
print(traceback.format_exc())
# ------------------------------------- # -------------------------------------
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -12,7 +12,6 @@ The work is actually done in backend code in model_install_backend.py.
import argparse import argparse
import os import os
import sys import sys
import traceback
from argparse import Namespace from argparse import Namespace
from pathlib import Path from pathlib import Path
from typing import List from typing import List
@ -482,7 +481,7 @@ def main():
sys.exit(-1) sys.exit(-1)
except KeyboardInterrupt: except KeyboardInterrupt:
print("\nGoodbye! Come back soon.") print("\nGoodbye! Come back soon.")
except (widget.NotEnoughSpaceForWidget, Exception) as e: except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"): if str(e).startswith("Height of 1 allocated"):
print( print(
"** Insufficient vertical space for the interface. Please make your window taller and try again" "** Insufficient vertical space for the interface. Please make your window taller and try again"
@ -491,11 +490,6 @@ def main():
print( print(
"** Insufficient horizontal space for the interface. Please make your window wider and try again." "** Insufficient horizontal space for the interface. Please make your window wider and try again."
) )
else:
print(f"** An error has occurred: {str(e)}")
traceback.print_exc()
sys.exit(-1)
# ------------------------------------- # -------------------------------------
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -120,12 +120,13 @@ def install_requested_models(
argument = '--autoconvert' if convert_to_diffusers else '--autoimport' argument = '--autoconvert' if convert_to_diffusers else '--autoimport'
initfile = Path(Globals.root, Globals.initfile) initfile = Path(Globals.root, Globals.initfile)
replacement = Path(Globals.root, f'{Globals.initfile}.new') replacement = Path(Globals.root, f'{Globals.initfile}.new')
directory = str(scan_directory).replace('\\','/')
with open(initfile,'r') as input: with open(initfile,'r') as input:
with open(replacement,'w') as output: with open(replacement,'w') as output:
while line := input.readline(): while line := input.readline():
if not line.startswith(argument): if not line.startswith(argument):
output.writelines([line]) output.writelines([line])
output.writelines([f'{argument} {str(scan_directory)}']) output.writelines([f'{argument} {directory}'])
os.replace(replacement,initfile) os.replace(replacement,initfile)
# ------------------------------------- # -------------------------------------
@ -234,7 +235,6 @@ def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
def download_from_hf( def download_from_hf(
model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs
): ):
print("", file=sys.stderr) # to prevent tqdm from overwriting
path = global_cache_dir(cache_subdir) path = global_cache_dir(cache_subdir)
model = model_class.from_pretrained( model = model_class.from_pretrained(
model_name, model_name,

View File

@ -4,26 +4,26 @@ Widget class definitions used by model_select.py, merge_diffusers.py and textual
import math import math
import platform import platform
import npyscreen import npyscreen
import os
import sys import sys
import curses import curses
import termios
import struct import struct
import fcntl
from shutil import get_terminal_size from shutil import get_terminal_size
# ------------------------------------- # -------------------------------------
def set_terminal_size(columns: int, lines: int): def set_terminal_size(columns: int, lines: int):
os = platform.uname().system OS = platform.uname().system
if os=="Windows": if OS=="Windows":
os.system(f'mode con: cols={columns} lines={lines}') os.system(f'mode con: cols={columns} lines={lines}')
elif os in ['Darwin', 'Linux']: elif OS in ['Darwin', 'Linux']:
import termios
import fcntl
winsize = struct.pack("HHHH", lines, columns, 0, 0) winsize = struct.pack("HHHH", lines, columns, 0, 0)
fcntl.ioctl(0, termios.TIOCSWINSZ, winsize) fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize)
sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=lines, cols=columns)) sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=lines, cols=columns))
sys.stdout.flush() sys.stdout.flush()
def set_min_terminal_size(min_cols: int, min_lines: int): def set_min_terminal_size(min_cols: int, min_lines: int):
# make sure there's enough room for the ui # make sure there's enough room for the ui
term_cols, term_lines = get_terminal_size() term_cols, term_lines = get_terminal_size()

View File

@ -428,8 +428,6 @@ class ModelManager(object):
torch.cuda.reset_peak_memory_stats() torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache() torch.cuda.empty_cache()
tic = time.time()
# this does the work # this does the work
if not os.path.isabs(config): if not os.path.isabs(config):
config = os.path.join(Globals.root, config) config = os.path.join(Globals.root, config)
@ -642,21 +640,18 @@ class ModelManager(object):
models.yaml file. models.yaml file.
""" """
model_name = model_name or Path(repo_or_path).stem model_name = model_name or Path(repo_or_path).stem
description = model_description or f"imported diffusers model {model_name}" model_description = model_description or f"imported diffusers model {model_name}"
new_config = dict( new_config = dict(
description=model_description, description=model_description,
vae=vae, vae=vae,
format="diffusers", format="diffusers",
) )
print(f"DEBUG: here i am 1")
if isinstance(repo_or_path, Path) and repo_or_path.exists(): if isinstance(repo_or_path, Path) and repo_or_path.exists():
new_config.update(path=str(repo_or_path)) new_config.update(path=str(repo_or_path))
else: else:
new_config.update(repo_id=repo_or_path) new_config.update(repo_id=repo_or_path)
print(f"DEBUG: here i am 2")
self.add_model(model_name, new_config, True) self.add_model(model_name, new_config, True)
print(f"DEBUG: config = {self.config}")
if commit_to_conf: if commit_to_conf:
self.commit(commit_to_conf) self.commit(commit_to_conf)
return model_name return model_name