Merge branch 'v2.3' into bugfix/sanity-check-rootdir

This commit is contained in:
Lincoln Stein 2023-02-23 11:14:52 -05:00 committed by GitHub
commit a485515bc6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 128 additions and 77 deletions

View File

@ -20,10 +20,9 @@ echo Building installer for version $VERSION
echo "Be certain that you're in the 'installer' directory before continuing."
read -p "Press any key to continue, or CTRL-C to exit..."
read -e -p "Commit and tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
read -e -p "Tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
RESPONSE=${input:='n'}
if [ "$RESPONSE" == 'y' ]; then
git commit -a
if ! git tag $VERSION ; then
echo "Existing/invalid tag"
@ -32,6 +31,8 @@ if [ "$RESPONSE" == 'y' ]; then
git push origin :refs/tags/$LATEST_TAG
git tag -fa $LATEST_TAG
echo "remember to push --tags!"
fi
# ----------------------

View File

@ -336,7 +336,8 @@ class InvokeAiInstance:
elif el in ['-y','--yes','--yes-to-all']:
new_argv.append(el)
sys.argv = new_argv
import requests # to catch download exceptions
from messages import introduction
introduction()
@ -346,7 +347,21 @@ class InvokeAiInstance:
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
# from the installer will also automatically propagate down to the config script.
# this may change in the future with config refactoring!
invokeai_configure.main()
succeeded = False
try:
invokeai_configure.main()
succeeded = True
except requests.exceptions.ConnectionError as e:
print(f'\nA network error was encountered during configuration and download: {str(e)}')
except OSError as e:
print(f'\nAn OS error was encountered during configuration and download: {str(e)}')
except Exception as e:
print(f'\nA problem was encountered during the configuration and download steps: {str(e)}')
finally:
if not succeeded:
print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
print('and choose option 7 to fix a broken install, optionally followed by option 5 to install models.')
print('Alternatively you can relaunch the installer.')
def install_user_scripts(self):
"""

View File

@ -6,8 +6,9 @@ setlocal
call .venv\Scripts\activate.bat
set INVOKEAI_ROOT=.
:start
echo Do you want to generate images using the
echo 1. command-line
echo 1. command-line interface
echo 2. browser-based UI
echo 3. run textual inversion training
echo 4. merge models (diffusers type only)
@ -17,7 +18,8 @@ echo 7. re-run the configure script to fix a broken install
echo 8. open the developer console
echo 9. update InvokeAI
echo 10. command-line help
set /P restore="Please enter 1-10: [2] "
echo Q - quit
set /P restore="Please enter 1-10, Q: [2] "
if not defined restore set restore=2
IF /I "%restore%" == "1" (
echo Starting the InvokeAI command-line..
@ -60,9 +62,19 @@ IF /I "%restore%" == "1" (
python .venv\Scripts\invokeai.exe --help %*
pause
exit /b
) ELSE IF /I "%restore%" == "q" (
echo Goodbye!
goto ending
) ELSE (
echo Invalid selection
pause
exit /b
)
goto start
endlocal
pause
:ending
exit /b

View File

@ -24,9 +24,11 @@ if [ "$(uname -s)" == "Darwin" ]; then
export PYTORCH_ENABLE_MPS_FALLBACK=1
fi
while true
do
if [ "$0" != "bash" ]; then
echo "Do you want to generate images using the"
echo "1. command-line"
echo "1. command-line interface"
echo "2. browser-based UI"
echo "3. run textual inversion training"
echo "4. merge models (diffusers type only)"
@ -35,35 +37,36 @@ if [ "$0" != "bash" ]; then
echo "7. re-run the configure script to fix a broken install"
echo "8. open the developer console"
echo "9. update InvokeAI"
echo "10. command-line help "
echo "10. command-line help"
echo "Q - Quit"
echo ""
read -p "Please enter 1-10: [2] " yn
read -p "Please enter 1-10, Q: [2] " yn
choice=${yn:='2'}
case $choice in
1)
echo "Starting the InvokeAI command-line..."
exec invokeai $@
invokeai $@
;;
2)
echo "Starting the InvokeAI browser-based UI..."
exec invokeai --web $@
invokeai --web $@
;;
3)
echo "Starting Textual Inversion:"
exec invokeai-ti --gui $@
invokeai-ti --gui $@
;;
4)
echo "Merging Models:"
exec invokeai-merge --gui $@
invokeai-merge --gui $@
;;
5)
exec invokeai-model-install --root ${INVOKEAI_ROOT}
invokeai-model-install --root ${INVOKEAI_ROOT}
;;
6)
exec invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
;;
7)
exec invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
;;
8)
echo "Developer Console:"
@ -72,10 +75,13 @@ if [ "$0" != "bash" ]; then
;;
9)
echo "Update:"
exec invokeai-update
invokeai-update
;;
10)
exec invokeai --help
invokeai --help
;;
[qQ])
exit 0
;;
*)
echo "Invalid selection"
@ -86,3 +92,4 @@ else # in developer console
echo "Press ^D to exit"
export PS1="(InvokeAI) \u@\h \w> "
fi
done

View File

@ -496,6 +496,7 @@ def main_loop(gen, opt):
def do_command(command: str, gen, opt: Args, completer) -> tuple:
global infile
operation = "generate" # default operation, alternative is 'postprocess'
command = command.replace('\\','/') # windows
if command.startswith(
"!dream"
@ -630,7 +631,6 @@ def import_model(model_path: str, gen, opt, completer, convert=False) -> str:
(3) a huggingface repository id; or (4) a local directory containing a
diffusers model.
"""
model_path = model_path.replace("\\", "/") # windows
default_name = Path(model_path).stem
model_name = None
model_desc = None
@ -672,7 +672,7 @@ def import_model(model_path: str, gen, opt, completer, convert=False) -> str:
gen.model_manager.commit(opt.conf)
completer.update_models(gen.model_manager.list_models())
print(f">> {model_name} successfully installed")
print(f">> {imported_name} successfully installed")
def _verify_load(model_name: str, gen) -> bool:
print(">> Verifying that new model loads...")

View File

@ -1 +1 @@
__version__='2.3.1+rc1'
__version__='2.3.1+rc3'

View File

@ -8,7 +8,6 @@
#
print("Loading Python libraries...\n")
import argparse
import curses
import io
import os
import re
@ -19,6 +18,7 @@ import warnings
from argparse import Namespace
from pathlib import Path
from urllib import request
from shutil import get_terminal_size
import npyscreen
import torch
@ -46,7 +46,8 @@ from .model_install_backend import (
recommended_datasets,
hf_download_with_resume,
)
from .widgets import IntTitleSlider, CenteredButtonPress
from .widgets import IntTitleSlider, CenteredButtonPress, set_min_terminal_size
warnings.filterwarnings("ignore")
@ -64,6 +65,10 @@ SD_Configs = Path(global_config_dir()) / "stable-diffusion"
Datasets = OmegaConf.load(Dataset_path)
# minimum size for the UI
MIN_COLS = 135
MIN_LINES = 45
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
# This is the InvokeAI initialization file, which contains command-line default values.
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
@ -109,8 +114,6 @@ Add the '--help' argument to see all of the command-line switches available for
# ---------------------------------------------
def yes_or_no(prompt: str, default_yes=True):
completer.set_options(["yes", "no"])
completer.complete_extensions(None) # turn off path-completion mode
default = "y" if default_yes else "n"
response = input(f"{prompt} [{default}] ") or default
if default_yes:
@ -162,7 +165,6 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
print(f"Installing {label} model file {model_url}...", end="", file=sys.stderr)
if not os.path.exists(model_dest):
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
print("", file=sys.stderr)
request.urlretrieve(
model_url, model_dest, ProgressBar(os.path.basename(model_dest))
)
@ -180,26 +182,22 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
def download_bert():
print(
"Installing bert tokenizer...",
end="",
file=sys.stderr,
file=sys.stderr
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from transformers import BertTokenizerFast
download_from_hf(BertTokenizerFast, "bert-base-uncased")
print("...success", file=sys.stderr)
# ---------------------------------------------
def download_clip():
print("Installing CLIP model...", file=sys.stderr)
version = "openai/clip-vit-large-patch14"
print("Tokenizer...", file=sys.stderr, end="")
print("Tokenizer...", file=sys.stderr)
download_from_hf(CLIPTokenizer, version)
print("Text model...", file=sys.stderr, end="")
print("Text model...", file=sys.stderr)
download_from_hf(CLIPTextModel, version)
print("...success", file=sys.stderr)
# ---------------------------------------------
@ -252,7 +250,7 @@ def download_codeformer():
# ---------------------------------------------
def download_clipseg():
print("Installing clipseg model for text-based masking...", end="", file=sys.stderr)
print("Installing clipseg model for text-based masking...", file=sys.stderr)
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
try:
download_from_hf(AutoProcessor, CLIPSEG_MODEL)
@ -260,7 +258,6 @@ def download_clipseg():
except Exception:
print("Error installing clipseg model:")
print(traceback.format_exc())
print("...success", file=sys.stderr)
# -------------------------------------
@ -276,15 +273,14 @@ def download_safety_checker():
print(traceback.format_exc())
return
safety_model_id = "CompVis/stable-diffusion-safety-checker"
print("AutoFeatureExtractor...", end="", file=sys.stderr)
print("AutoFeatureExtractor...", file=sys.stderr)
download_from_hf(AutoFeatureExtractor, safety_model_id)
print("StableDiffusionSafetyChecker...", end="", file=sys.stderr)
print("StableDiffusionSafetyChecker...", file=sys.stderr)
download_from_hf(StableDiffusionSafetyChecker, safety_model_id)
print("...success", file=sys.stderr)
# -------------------------------------
def download_vaes(precision: str):
def download_vaes():
print("Installing stabilityai VAE...", file=sys.stderr)
try:
# first the diffusers version
@ -292,8 +288,6 @@ def download_vaes(precision: str):
args = dict(
cache_dir=global_cache_dir("diffusers"),
)
if precision == "float16":
args.update(torch_dtype=torch.float16, revision="fp16")
if not AutoencoderKL.from_pretrained(repo_id, **args):
raise Exception(f"download of {repo_id} failed")
@ -306,7 +300,6 @@ def download_vaes(precision: str):
model_dir=str(Globals.root / Model_dir / Weights_dir),
):
raise Exception(f"download of {model_name} failed")
print("...downloaded successfully", file=sys.stderr)
except Exception as e:
print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
@ -332,8 +325,7 @@ class editOptsForm(npyscreen.FormMultiPage):
old_opts = self.parentApp.invokeai_opts
first_time = not (Globals.root / Globals.initfile).exists()
access_token = HfFolder.get_token()
window_height, window_width = curses.initscr().getmaxyx()
window_width,window_height = get_terminal_size()
for i in [
"Configure startup settings. You can come back and change these later.",
"Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields.",
@ -676,6 +668,8 @@ def run_console_ui(
) -> (Namespace, Namespace):
# parse_args() will read from init file if present
invokeai_opts = default_startup_options(initfile)
set_min_terminal_size(MIN_COLS, MIN_LINES)
editApp = EditOptApplication(program_opts, invokeai_opts)
editApp.run()
if editApp.user_cancelled:
@ -683,7 +677,6 @@ def run_console_ui(
else:
return (editApp.new_opts, editApp.user_selections)
# -------------------------------------
def write_opts(opts: Namespace, init_file: Path):
"""
@ -703,6 +696,9 @@ def write_opts(opts: Namespace, init_file: Path):
args_to_skip = re.compile(
"^--?(o|out|no-xformer|xformer|no-ckpt|ckpt|free|no-nsfw|nsfw|prec|max_load|embed|always|ckpt|free_gpu)"
)
# fix windows paths
opts.outdir = opts.outdir.replace('\\','/')
opts.embedding_path = opts.embedding_path.replace('\\','/')
new_file = f"{init_file}.new"
try:
lines = [x.strip() for x in open(init_file, "r").readlines()]
@ -842,7 +838,7 @@ def main():
download_codeformer()
download_clipseg()
download_safety_checker()
download_vaes(init_options.precision)
download_vaes()
if opt.skip_sd_weights:
print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
@ -853,10 +849,6 @@ def main():
postscript(errors=errors)
except KeyboardInterrupt:
print("\nGoodbye! Come back soon.")
except Exception as e:
print(f'\nA problem occurred during initialization.\nThe error was: "{str(e)}"')
print(traceback.format_exc())
# -------------------------------------
if __name__ == "__main__":

View File

@ -10,10 +10,8 @@ The work is actually done in backend code in model_install_backend.py.
"""
import argparse
import curses
import os
import sys
import traceback
from argparse import Namespace
from pathlib import Path
from typing import List
@ -22,15 +20,23 @@ import npyscreen
import torch
from npyscreen import widget
from omegaconf import OmegaConf
from shutil import get_terminal_size
from ..devices import choose_precision, choose_torch_device
from ..globals import Globals, global_config_dir
from .model_install_backend import (Dataset_path, default_config_file,
default_dataset, get_root,
install_requested_models,
recommended_datasets)
recommended_datasets,
)
from .widgets import (MultiSelectColumns, TextBox,
OffsetButtonPress, CenteredTitleText)
OffsetButtonPress, CenteredTitleText,
set_min_terminal_size,
)
# minimum size for the UI
MIN_COLS = 120
MIN_LINES = 45
class addModelsForm(npyscreen.FormMultiPage):
# for responsive resizing - disabled
@ -50,7 +56,7 @@ class addModelsForm(npyscreen.FormMultiPage):
super().__init__(parentApp=parentApp, name=name, *args, **keywords)
def create(self):
window_height, window_width = curses.initscr().getmaxyx()
window_width, window_height = get_terminal_size()
starter_model_labels = self._get_starter_model_labels()
recommended_models = [
x
@ -249,7 +255,7 @@ class addModelsForm(npyscreen.FormMultiPage):
)
def _get_starter_model_labels(self) -> List[str]:
window_height, window_width = curses.initscr().getmaxyx()
window_width, window_height = get_terminal_size()
label_width = 25
checkbox_width = 4
spacing_width = 2
@ -268,7 +274,7 @@ class addModelsForm(npyscreen.FormMultiPage):
]
def _get_columns(self) -> int:
window_height, window_width = curses.initscr().getmaxyx()
window_width, window_height = get_terminal_size()
cols = (
4
if window_width > 240
@ -362,7 +368,6 @@ class AddModelApplication(npyscreen.NPSAppManaged):
"MAIN", addModelsForm, name="Install Stable Diffusion Models"
)
# --------------------------------------------------------
def process_and_execute(opt: Namespace, selections: Namespace):
models_to_remove = [
@ -409,6 +414,7 @@ def select_and_download_models(opt: Namespace):
precision=precision,
)
else:
set_min_terminal_size(MIN_COLS, MIN_LINES)
installApp = AddModelApplication()
installApp.run()
@ -475,7 +481,7 @@ def main():
sys.exit(-1)
except KeyboardInterrupt:
print("\nGoodbye! Come back soon.")
except (widget.NotEnoughSpaceForWidget, Exception) as e:
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
print(
"** Insufficient vertical space for the interface. Please make your window taller and try again"
@ -484,11 +490,6 @@ def main():
print(
"** Insufficient horizontal space for the interface. Please make your window wider and try again."
)
else:
print(f"** An error has occurred: {str(e)}")
traceback.print_exc()
sys.exit(-1)
# -------------------------------------
if __name__ == "__main__":

View File

@ -67,6 +67,9 @@ def install_requested_models(
purge_deleted: bool = False,
config_file_path: Path = None,
):
'''
Entry point for installing/deleting starter models, or installing external models.
'''
config_file_path=config_file_path or default_config_file()
if not config_file_path.exists():
open(config_file_path,'w')
@ -117,14 +120,15 @@ def install_requested_models(
argument = '--autoconvert' if convert_to_diffusers else '--autoimport'
initfile = Path(Globals.root, Globals.initfile)
replacement = Path(Globals.root, f'{Globals.initfile}.new')
directory = str(scan_directory).replace('\\','/')
with open(initfile,'r') as input:
with open(replacement,'w') as output:
while line := input.readline():
if not line.startswith(argument):
output.writelines([line])
output.writelines([f'{argument} {str(scan_directory)}'])
output.writelines([f'{argument} {directory}'])
os.replace(replacement,initfile)
# -------------------------------------
def yes_or_no(prompt: str, default_yes=True):
default = "y" if default_yes else "n"
@ -231,7 +235,6 @@ def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
def download_from_hf(
model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs
):
print("", file=sys.stderr) # to prevent tqdm from overwriting
path = global_cache_dir(cache_subdir)
model = model_class.from_pretrained(
model_name,

View File

@ -2,8 +2,34 @@
Widget class definitions used by model_select.py, merge_diffusers.py and textual_inversion.py
'''
import math
import platform
import npyscreen
import os
import sys
import curses
import struct
from shutil import get_terminal_size
# -------------------------------------
def set_terminal_size(columns: int, lines: int):
OS = platform.uname().system
if OS=="Windows":
os.system(f'mode con: cols={columns} lines={lines}')
elif OS in ['Darwin', 'Linux']:
import termios
import fcntl
winsize = struct.pack("HHHH", lines, columns, 0, 0)
fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize)
sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=lines, cols=columns))
sys.stdout.flush()
def set_min_terminal_size(min_cols: int, min_lines: int):
# make sure there's enough room for the ui
term_cols, term_lines = get_terminal_size()
cols = max(term_cols, min_cols)
lines = max(term_lines, min_lines)
set_terminal_size(cols,lines)
class IntSlider(npyscreen.Slider):
def translate_value(self):
@ -23,7 +49,6 @@ class CenteredTitleText(npyscreen.TitleText):
maxy, maxx = self.parent.curses_pad.getmaxyx()
label = self.name
self.relx = (maxx - len(label)) // 2
begin_entry_at = -self.relx + 2
# -------------------------------------
class CenteredButtonPress(npyscreen.ButtonPress):

View File

@ -428,8 +428,6 @@ class ModelManager(object):
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
tic = time.time()
# this does the work
if not os.path.isabs(config):
config = os.path.join(Globals.root, config)
@ -642,21 +640,18 @@ class ModelManager(object):
models.yaml file.
"""
model_name = model_name or Path(repo_or_path).stem
description = model_description or f"imported diffusers model {model_name}"
model_description = model_description or f"Imported diffusers model {model_name}"
new_config = dict(
description=model_description,
vae=vae,
format="diffusers",
)
print(f"DEBUG: here i am 1")
if isinstance(repo_or_path, Path) and repo_or_path.exists():
new_config.update(path=str(repo_or_path))
else:
new_config.update(repo_id=repo_or_path)
print(f"DEBUG: here i am 2")
self.add_model(model_name, new_config, True)
print(f"DEBUG: config = {self.config}")
if commit_to_conf:
self.commit(commit_to_conf)
return model_name
@ -704,7 +699,7 @@ class ModelManager(object):
model_name or Path(weights).stem
) # note this gives ugly pathnames if used on a URL without a Content-Disposition header
model_description = (
model_description or f"imported stable diffusion weights file {model_name}"
model_description or f"Imported stable diffusion weights file {model_name}"
)
new_config = dict(
weights=str(weights_path),
@ -840,7 +835,7 @@ class ModelManager(object):
thing, commit_to_conf=commit_to_conf
)
pipeline, _, _, _ = self._load_diffusers_model(self.config[model_name])
return model_name
else:
print(
f"** {thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id"

View File

@ -38,7 +38,7 @@ dependencies = [
"albumentations",
"click",
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
"compel>=0.1.6",
"compel==0.1.7",
"datasets",
"diffusers[torch]~=0.13",
"dnspython==2.2.1",