Merge branch 'main' into fix/py39-compatibility

This commit is contained in:
Matthias Wild
2023-02-23 23:53:38 +01:00
committed by GitHub
22 changed files with 229 additions and 155 deletions

View File

@ -496,6 +496,7 @@ def main_loop(gen, opt):
def do_command(command: str, gen, opt: Args, completer) -> tuple:
global infile
operation = "generate" # default operation, alternative is 'postprocess'
command = command.replace('\\','/') # windows
if command.startswith(
"!dream"
@ -630,7 +631,6 @@ def import_model(model_path: str, gen, opt, completer, convert=False) -> str:
(3) a huggingface repository id; or (4) a local directory containing a
diffusers model.
"""
model_path = model_path.replace("\\", "/") # windows
default_name = Path(model_path).stem
model_name = None
model_desc = None
@ -672,7 +672,7 @@ def import_model(model_path: str, gen, opt, completer, convert=False) -> str:
gen.model_manager.commit(opt.conf)
completer.update_models(gen.model_manager.list_models())
print(f">> {model_name} successfully installed")
print(f">> {imported_name} successfully installed")
def _verify_load(model_name: str, gen) -> bool:
print(">> Verifying that new model loads...")

View File

@ -1 +1 @@
__version__='2.3.1+a0'
__version__='2.3.1+rc3'

View File

@ -8,7 +8,6 @@
#
print("Loading Python libraries...\n")
import argparse
import curses
import io
import os
import re
@ -19,6 +18,7 @@ import warnings
from argparse import Namespace
from pathlib import Path
from urllib import request
from shutil import get_terminal_size
import npyscreen
import torch
@ -46,7 +46,8 @@ from .model_install_backend import (
recommended_datasets,
hf_download_with_resume,
)
from .widgets import IntTitleSlider, CenteredButtonPress
from .widgets import IntTitleSlider, CenteredButtonPress, set_min_terminal_size
warnings.filterwarnings("ignore")
@ -64,6 +65,10 @@ SD_Configs = Path(global_config_dir()) / "stable-diffusion"
Datasets = OmegaConf.load(Dataset_path)
# minimum size for the UI
MIN_COLS = 135
MIN_LINES = 45
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
# This is the InvokeAI initialization file, which contains command-line default values.
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
@ -109,8 +114,6 @@ Add the '--help' argument to see all of the command-line switches available for
# ---------------------------------------------
def yes_or_no(prompt: str, default_yes=True):
completer.set_options(["yes", "no"])
completer.complete_extensions(None) # turn off path-completion mode
default = "y" if default_yes else "n"
response = input(f"{prompt} [{default}] ") or default
if default_yes:
@ -162,7 +165,6 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
print(f"Installing {label} model file {model_url}...", end="", file=sys.stderr)
if not os.path.exists(model_dest):
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
print("", file=sys.stderr)
request.urlretrieve(
model_url, model_dest, ProgressBar(os.path.basename(model_dest))
)
@ -180,26 +182,22 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
def download_bert():
print(
"Installing bert tokenizer...",
end="",
file=sys.stderr,
file=sys.stderr
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from transformers import BertTokenizerFast
download_from_hf(BertTokenizerFast, "bert-base-uncased")
print("...success", file=sys.stderr)
# ---------------------------------------------
def download_clip():
print("Installing CLIP model...", file=sys.stderr)
version = "openai/clip-vit-large-patch14"
print("Tokenizer...", file=sys.stderr, end="")
print("Tokenizer...", file=sys.stderr)
download_from_hf(CLIPTokenizer, version)
print("Text model...", file=sys.stderr, end="")
print("Text model...", file=sys.stderr)
download_from_hf(CLIPTextModel, version)
print("...success", file=sys.stderr)
# ---------------------------------------------
@ -252,7 +250,7 @@ def download_codeformer():
# ---------------------------------------------
def download_clipseg():
print("Installing clipseg model for text-based masking...", end="", file=sys.stderr)
print("Installing clipseg model for text-based masking...", file=sys.stderr)
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
try:
download_from_hf(AutoProcessor, CLIPSEG_MODEL)
@ -260,7 +258,6 @@ def download_clipseg():
except Exception:
print("Error installing clipseg model:")
print(traceback.format_exc())
print("...success", file=sys.stderr)
# -------------------------------------
@ -276,15 +273,14 @@ def download_safety_checker():
print(traceback.format_exc())
return
safety_model_id = "CompVis/stable-diffusion-safety-checker"
print("AutoFeatureExtractor...", end="", file=sys.stderr)
print("AutoFeatureExtractor...", file=sys.stderr)
download_from_hf(AutoFeatureExtractor, safety_model_id)
print("StableDiffusionSafetyChecker...", end="", file=sys.stderr)
print("StableDiffusionSafetyChecker...", file=sys.stderr)
download_from_hf(StableDiffusionSafetyChecker, safety_model_id)
print("...success", file=sys.stderr)
# -------------------------------------
def download_vaes(precision: str):
def download_vaes():
print("Installing stabilityai VAE...", file=sys.stderr)
try:
# first the diffusers version
@ -292,8 +288,6 @@ def download_vaes(precision: str):
args = dict(
cache_dir=global_cache_dir("diffusers"),
)
if precision == "float16":
args.update(torch_dtype=torch.float16, revision="fp16")
if not AutoencoderKL.from_pretrained(repo_id, **args):
raise Exception(f"download of {repo_id} failed")
@ -306,7 +300,6 @@ def download_vaes(precision: str):
model_dir=str(Globals.root / Model_dir / Weights_dir),
):
raise Exception(f"download of {model_name} failed")
print("...downloaded successfully", file=sys.stderr)
except Exception as e:
print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
@ -332,8 +325,7 @@ class editOptsForm(npyscreen.FormMultiPage):
old_opts = self.parentApp.invokeai_opts
first_time = not (Globals.root / Globals.initfile).exists()
access_token = HfFolder.get_token()
window_height, window_width = curses.initscr().getmaxyx()
window_width,window_height = get_terminal_size()
for i in [
"Configure startup settings. You can come back and change these later.",
"Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields.",
@ -676,6 +668,8 @@ def run_console_ui(
) -> (Namespace, Namespace):
# parse_args() will read from init file if present
invokeai_opts = default_startup_options(initfile)
set_min_terminal_size(MIN_COLS, MIN_LINES)
editApp = EditOptApplication(program_opts, invokeai_opts)
editApp.run()
if editApp.user_cancelled:
@ -683,7 +677,6 @@ def run_console_ui(
else:
return (editApp.new_opts, editApp.user_selections)
# -------------------------------------
def write_opts(opts: Namespace, init_file: Path):
"""
@ -703,6 +696,9 @@ def write_opts(opts: Namespace, init_file: Path):
args_to_skip = re.compile(
"^--?(o|out|no-xformer|xformer|no-ckpt|ckpt|free|no-nsfw|nsfw|prec|max_load|embed|always|ckpt|free_gpu)"
)
# fix windows paths
opts.outdir = opts.outdir.replace('\\','/')
opts.embedding_path = opts.embedding_path.replace('\\','/')
new_file = f"{init_file}.new"
try:
lines = [x.strip() for x in open(init_file, "r").readlines()]
@ -842,7 +838,7 @@ def main():
download_codeformer()
download_clipseg()
download_safety_checker()
download_vaes(init_options.precision)
download_vaes()
if opt.skip_sd_weights:
print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
@ -853,10 +849,6 @@ def main():
postscript(errors=errors)
except KeyboardInterrupt:
print("\nGoodbye! Come back soon.")
except Exception as e:
print(f'\nA problem occurred during initialization.\nThe error was: "{str(e)}"')
print(traceback.format_exc())
# -------------------------------------
if __name__ == "__main__":

View File

@ -2,18 +2,16 @@
Minimalist updater script. Prompts user for the tag or branch to update to and runs
pip install <path_to_git_source>.
'''
import os
import platform
import requests
import subprocess
from rich import box, print
from rich.console import Console, group
from rich.console import Console, Group, group
from rich.panel import Panel
from rich.prompt import Prompt
from rich.style import Style
from rich.syntax import Syntax
from rich.text import Text
from rich.live import Live
from rich.table import Table
from ldm.invoke import __version__
@ -23,19 +21,17 @@ INVOKE_AI_REL="https://api.github.com/repos/invoke-ai/InvokeAI/releases"
OS = platform.uname().system
ARCH = platform.uname().machine
ORANGE_ON_DARK_GREY = Style(bgcolor="grey23", color="orange1")
if OS == "Windows":
# Windows terminals look better without a background colour
console = Console(style=Style(color="grey74"))
else:
console = Console(style=Style(color="grey74", bgcolor="grey23"))
console = Console(style=Style(color="grey74", bgcolor="grey19"))
def get_versions()->dict:
return requests.get(url=INVOKE_AI_REL).json()
def welcome(versions: dict):
@group()
def text():
yield f'InvokeAI Version: [bold yellow]{__version__}'
@ -48,55 +44,45 @@ def welcome(versions: dict):
[3] Manually enter the tag or branch name you wish to update'''
console.rule()
console.print(
print(
Panel(
title="[bold wheat1]InvokeAI Updater",
renderable=text(),
box=box.DOUBLE,
expand=True,
padding=(1, 2),
style=ORANGE_ON_DARK_GREY,
style=Style(bgcolor="grey23", color="orange1"),
subtitle=f"[bold grey39]{OS}-{ARCH}",
)
)
# console.rule is used instead of console.line to maintain dark background
# on terminals where light background is the default
console.rule(characters=" ")
console.line()
def main():
versions = get_versions()
welcome(versions)
tag = None
choice = Prompt.ask(Text.from_markup(('[grey74 on grey23]Choice:')),choices=['1','2','3'],default='1')
choice = Prompt.ask('Choice:',choices=['1','2','3'],default='1')
if choice=='1':
tag = versions[0]['tag_name']
elif choice=='2':
tag = 'main'
elif choice=='3':
tag = Prompt.ask('[grey74 on grey23]Enter an InvokeAI tag or branch name')
console.print(Panel(f':crossed_fingers: Upgrading to [yellow]{tag}[/yellow]', box=box.MINIMAL, style=ORANGE_ON_DARK_GREY))
tag = Prompt.ask('Enter an InvokeAI tag or branch name')
print(f':crossed_fingers: Upgrading to [yellow]{tag}[/yellow]')
cmd = f'pip install {INVOKE_AI_SRC}/{tag}.zip --use-pep517'
progress = Table.grid(expand=True)
progress_panel = Panel(progress, box=box.MINIMAL, style=ORANGE_ON_DARK_GREY)
with subprocess.Popen(['bash', '-c', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
progress.add_column()
with Live(progress_panel, console=console, vertical_overflow='visible'):
while proc.poll() is None:
for l in iter(proc.stdout.readline, b''):
progress.add_row(l.decode().strip(), style=ORANGE_ON_DARK_GREY)
if proc.returncode == 0:
console.rule(f':heavy_check_mark: Upgrade successful')
else:
console.rule(f':exclamation: [bold red]Upgrade failed[/red bold]')
print('')
print('')
if os.system(cmd)==0:
print(f':heavy_check_mark: Upgrade successful')
else:
print(f':exclamation: [bold red]Upgrade failed[/red bold]')
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass

View File

@ -10,10 +10,8 @@ The work is actually done in backend code in model_install_backend.py.
"""
import argparse
import curses
import os
import sys
import traceback
from argparse import Namespace
from pathlib import Path
from typing import List
@ -22,15 +20,23 @@ import npyscreen
import torch
from npyscreen import widget
from omegaconf import OmegaConf
from shutil import get_terminal_size
from ..devices import choose_precision, choose_torch_device
from ..globals import Globals, global_config_dir
from .model_install_backend import (Dataset_path, default_config_file,
default_dataset, get_root,
install_requested_models,
recommended_datasets)
recommended_datasets,
)
from .widgets import (MultiSelectColumns, TextBox,
OffsetButtonPress, CenteredTitleText)
OffsetButtonPress, CenteredTitleText,
set_min_terminal_size,
)
# minimum size for the UI
MIN_COLS = 120
MIN_LINES = 45
class addModelsForm(npyscreen.FormMultiPage):
# for responsive resizing - disabled
@ -50,7 +56,7 @@ class addModelsForm(npyscreen.FormMultiPage):
super().__init__(parentApp=parentApp, name=name, *args, **keywords)
def create(self):
window_height, window_width = curses.initscr().getmaxyx()
window_width, window_height = get_terminal_size()
starter_model_labels = self._get_starter_model_labels()
recommended_models = [
x
@ -249,7 +255,7 @@ class addModelsForm(npyscreen.FormMultiPage):
)
def _get_starter_model_labels(self) -> List[str]:
window_height, window_width = curses.initscr().getmaxyx()
window_width, window_height = get_terminal_size()
label_width = 25
checkbox_width = 4
spacing_width = 2
@ -268,7 +274,7 @@ class addModelsForm(npyscreen.FormMultiPage):
]
def _get_columns(self) -> int:
window_height, window_width = curses.initscr().getmaxyx()
window_width, window_height = get_terminal_size()
cols = (
4
if window_width > 240
@ -362,7 +368,6 @@ class AddModelApplication(npyscreen.NPSAppManaged):
"MAIN", addModelsForm, name="Install Stable Diffusion Models"
)
# --------------------------------------------------------
def process_and_execute(opt: Namespace, selections: Namespace):
models_to_remove = [
@ -409,6 +414,7 @@ def select_and_download_models(opt: Namespace):
precision=precision,
)
else:
set_min_terminal_size(MIN_COLS, MIN_LINES)
installApp = AddModelApplication()
installApp.run()
@ -475,7 +481,7 @@ def main():
sys.exit(-1)
except KeyboardInterrupt:
print("\nGoodbye! Come back soon.")
except (widget.NotEnoughSpaceForWidget, Exception) as e:
except widget.NotEnoughSpaceForWidget as e:
if str(e).startswith("Height of 1 allocated"):
print(
"** Insufficient vertical space for the interface. Please make your window taller and try again"
@ -484,11 +490,6 @@ def main():
print(
"** Insufficient horizontal space for the interface. Please make your window wider and try again."
)
else:
print(f"** An error has occurred: {str(e)}")
traceback.print_exc()
sys.exit(-1)
# -------------------------------------
if __name__ == "__main__":

View File

@ -67,6 +67,9 @@ def install_requested_models(
purge_deleted: bool = False,
config_file_path: Path = None,
):
'''
Entry point for installing/deleting starter models, or installing external models.
'''
config_file_path=config_file_path or default_config_file()
if not config_file_path.exists():
open(config_file_path,'w')
@ -117,14 +120,15 @@ def install_requested_models(
argument = '--autoconvert' if convert_to_diffusers else '--autoimport'
initfile = Path(Globals.root, Globals.initfile)
replacement = Path(Globals.root, f'{Globals.initfile}.new')
directory = str(scan_directory).replace('\\','/')
with open(initfile,'r') as input:
with open(replacement,'w') as output:
while line := input.readline():
if not line.startswith(argument):
output.writelines([line])
output.writelines([f'{argument} {str(scan_directory)}'])
output.writelines([f'{argument} {directory}'])
os.replace(replacement,initfile)
# -------------------------------------
def yes_or_no(prompt: str, default_yes=True):
default = "y" if default_yes else "n"
@ -231,7 +235,6 @@ def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
def download_from_hf(
model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs
):
print("", file=sys.stderr) # to prevent tqdm from overwriting
path = global_cache_dir(cache_subdir)
model = model_class.from_pretrained(
model_name,

View File

@ -2,8 +2,34 @@
Widget class definitions used by model_select.py, merge_diffusers.py and textual_inversion.py
'''
import math
import platform
import npyscreen
import os
import sys
import curses
import struct
from shutil import get_terminal_size
# -------------------------------------
def set_terminal_size(columns: int, lines: int):
OS = platform.uname().system
if OS=="Windows":
os.system(f'mode con: cols={columns} lines={lines}')
elif OS in ['Darwin', 'Linux']:
import termios
import fcntl
winsize = struct.pack("HHHH", lines, columns, 0, 0)
fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize)
sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=lines, cols=columns))
sys.stdout.flush()
def set_min_terminal_size(min_cols: int, min_lines: int):
# make sure there's enough room for the ui
term_cols, term_lines = get_terminal_size()
cols = max(term_cols, min_cols)
lines = max(term_lines, min_lines)
set_terminal_size(cols,lines)
class IntSlider(npyscreen.Slider):
def translate_value(self):
@ -23,7 +49,6 @@ class CenteredTitleText(npyscreen.TitleText):
maxy, maxx = self.parent.curses_pad.getmaxyx()
label = self.name
self.relx = (maxx - len(label)) // 2
begin_entry_at = -self.relx + 2
# -------------------------------------
class CenteredButtonPress(npyscreen.ButtonPress):

View File

@ -19,15 +19,7 @@ from typing import Union
Globals = Namespace()
# This is usually overwritten by the command line and/or environment variables
if os.environ.get('INVOKEAI_ROOT'):
Globals.root = osp.abspath(os.environ.get('INVOKEAI_ROOT'))
elif os.environ.get('VIRTUAL_ENV'):
Globals.root = osp.abspath(osp.join(os.environ.get('VIRTUAL_ENV'), '..'))
else:
Globals.root = osp.abspath(osp.expanduser('~/invokeai'))
# Where to look for the initialization file
# Where to look for the initialization file and other key components
Globals.initfile = 'invokeai.init'
Globals.models_file = 'models.yaml'
Globals.models_dir = 'models'
@ -35,6 +27,20 @@ Globals.config_dir = 'configs'
Globals.autoscan_dir = 'weights'
Globals.converted_ckpts_dir = 'converted_ckpts'
# Set the default root directory. This can be overwritten by explicitly
# passing the `--root <directory>` argument on the command line.
# logic is:
# 1) use INVOKEAI_ROOT environment variable (no check for this being a valid directory)
# 2) use VIRTUAL_ENV environment variable, with a check for initfile being there
# 3) use ~/invokeai
if os.environ.get('INVOKEAI_ROOT'):
Globals.root = osp.abspath(os.environ.get('INVOKEAI_ROOT'))
elif os.environ.get('VIRTUAL_ENV') and Path(os.environ.get('VIRTUAL_ENV'),'..',Globals.initfile).exists():
Globals.root = osp.abspath(osp.join(os.environ.get('VIRTUAL_ENV'), '..'))
else:
Globals.root = osp.abspath(osp.expanduser('~/invokeai'))
# Try loading patchmatch
Globals.try_patchmatch = True

View File

@ -428,8 +428,6 @@ class ModelManager(object):
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
tic = time.time()
# this does the work
if not os.path.isabs(config):
config = os.path.join(Globals.root, config)
@ -642,21 +640,18 @@ class ModelManager(object):
models.yaml file.
"""
model_name = model_name or Path(repo_or_path).stem
description = model_description or f"imported diffusers model {model_name}"
model_description = model_description or f"Imported diffusers model {model_name}"
new_config = dict(
description=model_description,
vae=vae,
format="diffusers",
)
print(f"DEBUG: here i am 1")
if isinstance(repo_or_path, Path) and repo_or_path.exists():
new_config.update(path=str(repo_or_path))
else:
new_config.update(repo_id=repo_or_path)
print(f"DEBUG: here i am 2")
self.add_model(model_name, new_config, True)
print(f"DEBUG: config = {self.config}")
if commit_to_conf:
self.commit(commit_to_conf)
return model_name
@ -704,7 +699,7 @@ class ModelManager(object):
model_name or Path(weights).stem
) # note this gives ugly pathnames if used on a URL without a Content-Disposition header
model_description = (
model_description or f"imported stable diffusion weights file {model_name}"
model_description or f"Imported stable diffusion weights file {model_name}"
)
new_config = dict(
weights=str(weights_path),
@ -840,7 +835,7 @@ class ModelManager(object):
thing, commit_to_conf=commit_to_conf
)
pipeline, _, _, _ = self._load_diffusers_model(self.config[model_name])
return model_name
else:
print(
f"** {thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id"