mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Bugfix/windows install (#2770)
# This will constitute v2.3.1+rc2 ## Windows installer enhancements 1. resize installer window to give more room for configure and download forms 2. replace '\' with '/' in directory names to allow user to drag-and-drop folders into the dialogue boxes that accept directories. 3. similar change in CLI for the !import_model and !convert_model commands 4. better error reporting when a model download fails due to network errors 5. put the launcher scripts into a loop so that menu reappears after invokeai, merge script, etc exits. User can quit with "Q". 6. do not try to download fp16 of sd-ft-mse-vae, since it doesn't exist. 7. cleaned up status reporting when installing models 8. Detect when install failed for some reason and print helpful error message rather than stack trace. 9. Detect window size and resize to minimum acceptable values to provide better display of configure and install forms. 10. Fix a bug in the CLI which prevented diffusers imported by their repo_ids from being correctly registered in the current session (though they install correctly) 11. Capitalize the "i" in Imported in the autogenerated descriptions.
This commit is contained in:
commit
2c9b29725b
@ -20,10 +20,9 @@ echo Building installer for version $VERSION
|
|||||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||||
|
|
||||||
read -e -p "Commit and tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
read -e -p "Tag this repo with '${VERSION}' and '${LATEST_TAG}'? [n]: " input
|
||||||
RESPONSE=${input:='n'}
|
RESPONSE=${input:='n'}
|
||||||
if [ "$RESPONSE" == 'y' ]; then
|
if [ "$RESPONSE" == 'y' ]; then
|
||||||
git commit -a
|
|
||||||
|
|
||||||
if ! git tag $VERSION ; then
|
if ! git tag $VERSION ; then
|
||||||
echo "Existing/invalid tag"
|
echo "Existing/invalid tag"
|
||||||
@ -32,6 +31,8 @@ if [ "$RESPONSE" == 'y' ]; then
|
|||||||
|
|
||||||
git push origin :refs/tags/$LATEST_TAG
|
git push origin :refs/tags/$LATEST_TAG
|
||||||
git tag -fa $LATEST_TAG
|
git tag -fa $LATEST_TAG
|
||||||
|
|
||||||
|
echo "remember to push --tags!"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ----------------------
|
# ----------------------
|
||||||
|
@ -337,6 +337,7 @@ class InvokeAiInstance:
|
|||||||
new_argv.append(el)
|
new_argv.append(el)
|
||||||
sys.argv = new_argv
|
sys.argv = new_argv
|
||||||
|
|
||||||
|
import requests # to catch download exceptions
|
||||||
from messages import introduction
|
from messages import introduction
|
||||||
|
|
||||||
introduction()
|
introduction()
|
||||||
@ -346,7 +347,21 @@ class InvokeAiInstance:
|
|||||||
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
||||||
# from the installer will also automatically propagate down to the config script.
|
# from the installer will also automatically propagate down to the config script.
|
||||||
# this may change in the future with config refactoring!
|
# this may change in the future with config refactoring!
|
||||||
|
succeeded = False
|
||||||
|
try:
|
||||||
invokeai_configure.main()
|
invokeai_configure.main()
|
||||||
|
succeeded = True
|
||||||
|
except requests.exceptions.ConnectionError as e:
|
||||||
|
print(f'\nA network error was encountered during configuration and download: {str(e)}')
|
||||||
|
except OSError as e:
|
||||||
|
print(f'\nAn OS error was encountered during configuration and download: {str(e)}')
|
||||||
|
except Exception as e:
|
||||||
|
print(f'\nA problem was encountered during the configuration and download steps: {str(e)}')
|
||||||
|
finally:
|
||||||
|
if not succeeded:
|
||||||
|
print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
|
||||||
|
print('and choose option 7 to fix a broken install, optionally followed by option 5 to install models.')
|
||||||
|
print('Alternatively you can relaunch the installer.')
|
||||||
|
|
||||||
def install_user_scripts(self):
|
def install_user_scripts(self):
|
||||||
"""
|
"""
|
||||||
|
@ -6,8 +6,9 @@ setlocal
|
|||||||
call .venv\Scripts\activate.bat
|
call .venv\Scripts\activate.bat
|
||||||
set INVOKEAI_ROOT=.
|
set INVOKEAI_ROOT=.
|
||||||
|
|
||||||
|
:start
|
||||||
echo Do you want to generate images using the
|
echo Do you want to generate images using the
|
||||||
echo 1. command-line
|
echo 1. command-line interface
|
||||||
echo 2. browser-based UI
|
echo 2. browser-based UI
|
||||||
echo 3. run textual inversion training
|
echo 3. run textual inversion training
|
||||||
echo 4. merge models (diffusers type only)
|
echo 4. merge models (diffusers type only)
|
||||||
@ -17,7 +18,8 @@ echo 7. re-run the configure script to fix a broken install
|
|||||||
echo 8. open the developer console
|
echo 8. open the developer console
|
||||||
echo 9. update InvokeAI
|
echo 9. update InvokeAI
|
||||||
echo 10. command-line help
|
echo 10. command-line help
|
||||||
set /P restore="Please enter 1-10: [2] "
|
echo Q - quit
|
||||||
|
set /P restore="Please enter 1-10, Q: [2] "
|
||||||
if not defined restore set restore=2
|
if not defined restore set restore=2
|
||||||
IF /I "%restore%" == "1" (
|
IF /I "%restore%" == "1" (
|
||||||
echo Starting the InvokeAI command-line..
|
echo Starting the InvokeAI command-line..
|
||||||
@ -60,9 +62,19 @@ IF /I "%restore%" == "1" (
|
|||||||
python .venv\Scripts\invokeai.exe --help %*
|
python .venv\Scripts\invokeai.exe --help %*
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
|
) ELSE IF /I "%restore%" == "q" (
|
||||||
|
echo Goodbye!
|
||||||
|
goto ending
|
||||||
) ELSE (
|
) ELSE (
|
||||||
echo Invalid selection
|
echo Invalid selection
|
||||||
pause
|
pause
|
||||||
exit /b
|
exit /b
|
||||||
)
|
)
|
||||||
|
goto start
|
||||||
|
|
||||||
endlocal
|
endlocal
|
||||||
|
pause
|
||||||
|
|
||||||
|
:ending
|
||||||
|
exit /b
|
||||||
|
|
||||||
|
@ -24,9 +24,11 @@ if [ "$(uname -s)" == "Darwin" ]; then
|
|||||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
while true
|
||||||
|
do
|
||||||
if [ "$0" != "bash" ]; then
|
if [ "$0" != "bash" ]; then
|
||||||
echo "Do you want to generate images using the"
|
echo "Do you want to generate images using the"
|
||||||
echo "1. command-line"
|
echo "1. command-line interface"
|
||||||
echo "2. browser-based UI"
|
echo "2. browser-based UI"
|
||||||
echo "3. run textual inversion training"
|
echo "3. run textual inversion training"
|
||||||
echo "4. merge models (diffusers type only)"
|
echo "4. merge models (diffusers type only)"
|
||||||
@ -35,35 +37,36 @@ if [ "$0" != "bash" ]; then
|
|||||||
echo "7. re-run the configure script to fix a broken install"
|
echo "7. re-run the configure script to fix a broken install"
|
||||||
echo "8. open the developer console"
|
echo "8. open the developer console"
|
||||||
echo "9. update InvokeAI"
|
echo "9. update InvokeAI"
|
||||||
echo "10. command-line help "
|
echo "10. command-line help"
|
||||||
|
echo "Q - Quit"
|
||||||
echo ""
|
echo ""
|
||||||
read -p "Please enter 1-10: [2] " yn
|
read -p "Please enter 1-10, Q: [2] " yn
|
||||||
choice=${yn:='2'}
|
choice=${yn:='2'}
|
||||||
case $choice in
|
case $choice in
|
||||||
1)
|
1)
|
||||||
echo "Starting the InvokeAI command-line..."
|
echo "Starting the InvokeAI command-line..."
|
||||||
exec invokeai $@
|
invokeai $@
|
||||||
;;
|
;;
|
||||||
2)
|
2)
|
||||||
echo "Starting the InvokeAI browser-based UI..."
|
echo "Starting the InvokeAI browser-based UI..."
|
||||||
exec invokeai --web $@
|
invokeai --web $@
|
||||||
;;
|
;;
|
||||||
3)
|
3)
|
||||||
echo "Starting Textual Inversion:"
|
echo "Starting Textual Inversion:"
|
||||||
exec invokeai-ti --gui $@
|
invokeai-ti --gui $@
|
||||||
;;
|
;;
|
||||||
4)
|
4)
|
||||||
echo "Merging Models:"
|
echo "Merging Models:"
|
||||||
exec invokeai-merge --gui $@
|
invokeai-merge --gui $@
|
||||||
;;
|
;;
|
||||||
5)
|
5)
|
||||||
exec invokeai-model-install --root ${INVOKEAI_ROOT}
|
invokeai-model-install --root ${INVOKEAI_ROOT}
|
||||||
;;
|
;;
|
||||||
6)
|
6)
|
||||||
exec invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
|
||||||
;;
|
;;
|
||||||
7)
|
7)
|
||||||
exec invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
|
||||||
;;
|
;;
|
||||||
8)
|
8)
|
||||||
echo "Developer Console:"
|
echo "Developer Console:"
|
||||||
@ -72,10 +75,13 @@ if [ "$0" != "bash" ]; then
|
|||||||
;;
|
;;
|
||||||
9)
|
9)
|
||||||
echo "Update:"
|
echo "Update:"
|
||||||
exec invokeai-update
|
invokeai-update
|
||||||
;;
|
;;
|
||||||
10)
|
10)
|
||||||
exec invokeai --help
|
invokeai --help
|
||||||
|
;;
|
||||||
|
[qQ])
|
||||||
|
exit 0
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Invalid selection"
|
echo "Invalid selection"
|
||||||
@ -86,3 +92,4 @@ else # in developer console
|
|||||||
echo "Press ^D to exit"
|
echo "Press ^D to exit"
|
||||||
export PS1="(InvokeAI) \u@\h \w> "
|
export PS1="(InvokeAI) \u@\h \w> "
|
||||||
fi
|
fi
|
||||||
|
done
|
||||||
|
@ -496,6 +496,7 @@ def main_loop(gen, opt):
|
|||||||
def do_command(command: str, gen, opt: Args, completer) -> tuple:
|
def do_command(command: str, gen, opt: Args, completer) -> tuple:
|
||||||
global infile
|
global infile
|
||||||
operation = "generate" # default operation, alternative is 'postprocess'
|
operation = "generate" # default operation, alternative is 'postprocess'
|
||||||
|
command = command.replace('\\','/') # windows
|
||||||
|
|
||||||
if command.startswith(
|
if command.startswith(
|
||||||
"!dream"
|
"!dream"
|
||||||
@ -630,7 +631,6 @@ def import_model(model_path: str, gen, opt, completer, convert=False) -> str:
|
|||||||
(3) a huggingface repository id; or (4) a local directory containing a
|
(3) a huggingface repository id; or (4) a local directory containing a
|
||||||
diffusers model.
|
diffusers model.
|
||||||
"""
|
"""
|
||||||
model_path = model_path.replace("\\", "/") # windows
|
|
||||||
default_name = Path(model_path).stem
|
default_name = Path(model_path).stem
|
||||||
model_name = None
|
model_name = None
|
||||||
model_desc = None
|
model_desc = None
|
||||||
@ -672,7 +672,7 @@ def import_model(model_path: str, gen, opt, completer, convert=False) -> str:
|
|||||||
|
|
||||||
gen.model_manager.commit(opt.conf)
|
gen.model_manager.commit(opt.conf)
|
||||||
completer.update_models(gen.model_manager.list_models())
|
completer.update_models(gen.model_manager.list_models())
|
||||||
print(f">> {model_name} successfully installed")
|
print(f">> {imported_name} successfully installed")
|
||||||
|
|
||||||
def _verify_load(model_name: str, gen) -> bool:
|
def _verify_load(model_name: str, gen) -> bool:
|
||||||
print(">> Verifying that new model loads...")
|
print(">> Verifying that new model loads...")
|
||||||
|
@ -1 +1 @@
|
|||||||
__version__='2.3.1+rc1'
|
__version__='2.3.1+rc3'
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
#
|
#
|
||||||
print("Loading Python libraries...\n")
|
print("Loading Python libraries...\n")
|
||||||
import argparse
|
import argparse
|
||||||
import curses
|
|
||||||
import io
|
import io
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
@ -19,6 +18,7 @@ import warnings
|
|||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from urllib import request
|
from urllib import request
|
||||||
|
from shutil import get_terminal_size
|
||||||
|
|
||||||
import npyscreen
|
import npyscreen
|
||||||
import torch
|
import torch
|
||||||
@ -46,7 +46,8 @@ from .model_install_backend import (
|
|||||||
recommended_datasets,
|
recommended_datasets,
|
||||||
hf_download_with_resume,
|
hf_download_with_resume,
|
||||||
)
|
)
|
||||||
from .widgets import IntTitleSlider, CenteredButtonPress
|
from .widgets import IntTitleSlider, CenteredButtonPress, set_min_terminal_size
|
||||||
|
|
||||||
|
|
||||||
warnings.filterwarnings("ignore")
|
warnings.filterwarnings("ignore")
|
||||||
|
|
||||||
@ -64,6 +65,10 @@ SD_Configs = Path(global_config_dir()) / "stable-diffusion"
|
|||||||
|
|
||||||
Datasets = OmegaConf.load(Dataset_path)
|
Datasets = OmegaConf.load(Dataset_path)
|
||||||
|
|
||||||
|
# minimum size for the UI
|
||||||
|
MIN_COLS = 135
|
||||||
|
MIN_LINES = 45
|
||||||
|
|
||||||
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
|
INIT_FILE_PREAMBLE = """# InvokeAI initialization file
|
||||||
# This is the InvokeAI initialization file, which contains command-line default values.
|
# This is the InvokeAI initialization file, which contains command-line default values.
|
||||||
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
||||||
@ -109,8 +114,6 @@ Add the '--help' argument to see all of the command-line switches available for
|
|||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
def yes_or_no(prompt: str, default_yes=True):
|
def yes_or_no(prompt: str, default_yes=True):
|
||||||
completer.set_options(["yes", "no"])
|
|
||||||
completer.complete_extensions(None) # turn off path-completion mode
|
|
||||||
default = "y" if default_yes else "n"
|
default = "y" if default_yes else "n"
|
||||||
response = input(f"{prompt} [{default}] ") or default
|
response = input(f"{prompt} [{default}] ") or default
|
||||||
if default_yes:
|
if default_yes:
|
||||||
@ -162,7 +165,6 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
|
|||||||
print(f"Installing {label} model file {model_url}...", end="", file=sys.stderr)
|
print(f"Installing {label} model file {model_url}...", end="", file=sys.stderr)
|
||||||
if not os.path.exists(model_dest):
|
if not os.path.exists(model_dest):
|
||||||
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
||||||
print("", file=sys.stderr)
|
|
||||||
request.urlretrieve(
|
request.urlretrieve(
|
||||||
model_url, model_dest, ProgressBar(os.path.basename(model_dest))
|
model_url, model_dest, ProgressBar(os.path.basename(model_dest))
|
||||||
)
|
)
|
||||||
@ -180,26 +182,22 @@ def download_with_progress_bar(model_url: str, model_dest: str, label: str = "th
|
|||||||
def download_bert():
|
def download_bert():
|
||||||
print(
|
print(
|
||||||
"Installing bert tokenizer...",
|
"Installing bert tokenizer...",
|
||||||
end="",
|
file=sys.stderr
|
||||||
file=sys.stderr,
|
|
||||||
)
|
)
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||||
from transformers import BertTokenizerFast
|
from transformers import BertTokenizerFast
|
||||||
|
|
||||||
download_from_hf(BertTokenizerFast, "bert-base-uncased")
|
download_from_hf(BertTokenizerFast, "bert-base-uncased")
|
||||||
print("...success", file=sys.stderr)
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
def download_clip():
|
def download_clip():
|
||||||
print("Installing CLIP model...", file=sys.stderr)
|
print("Installing CLIP model...", file=sys.stderr)
|
||||||
version = "openai/clip-vit-large-patch14"
|
version = "openai/clip-vit-large-patch14"
|
||||||
print("Tokenizer...", file=sys.stderr, end="")
|
print("Tokenizer...", file=sys.stderr)
|
||||||
download_from_hf(CLIPTokenizer, version)
|
download_from_hf(CLIPTokenizer, version)
|
||||||
print("Text model...", file=sys.stderr, end="")
|
print("Text model...", file=sys.stderr)
|
||||||
download_from_hf(CLIPTextModel, version)
|
download_from_hf(CLIPTextModel, version)
|
||||||
print("...success", file=sys.stderr)
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
@ -252,7 +250,7 @@ def download_codeformer():
|
|||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
def download_clipseg():
|
def download_clipseg():
|
||||||
print("Installing clipseg model for text-based masking...", end="", file=sys.stderr)
|
print("Installing clipseg model for text-based masking...", file=sys.stderr)
|
||||||
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
|
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
|
||||||
try:
|
try:
|
||||||
download_from_hf(AutoProcessor, CLIPSEG_MODEL)
|
download_from_hf(AutoProcessor, CLIPSEG_MODEL)
|
||||||
@ -260,7 +258,6 @@ def download_clipseg():
|
|||||||
except Exception:
|
except Exception:
|
||||||
print("Error installing clipseg model:")
|
print("Error installing clipseg model:")
|
||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
print("...success", file=sys.stderr)
|
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
@ -276,15 +273,14 @@ def download_safety_checker():
|
|||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
return
|
return
|
||||||
safety_model_id = "CompVis/stable-diffusion-safety-checker"
|
safety_model_id = "CompVis/stable-diffusion-safety-checker"
|
||||||
print("AutoFeatureExtractor...", end="", file=sys.stderr)
|
print("AutoFeatureExtractor...", file=sys.stderr)
|
||||||
download_from_hf(AutoFeatureExtractor, safety_model_id)
|
download_from_hf(AutoFeatureExtractor, safety_model_id)
|
||||||
print("StableDiffusionSafetyChecker...", end="", file=sys.stderr)
|
print("StableDiffusionSafetyChecker...", file=sys.stderr)
|
||||||
download_from_hf(StableDiffusionSafetyChecker, safety_model_id)
|
download_from_hf(StableDiffusionSafetyChecker, safety_model_id)
|
||||||
print("...success", file=sys.stderr)
|
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def download_vaes(precision: str):
|
def download_vaes():
|
||||||
print("Installing stabilityai VAE...", file=sys.stderr)
|
print("Installing stabilityai VAE...", file=sys.stderr)
|
||||||
try:
|
try:
|
||||||
# first the diffusers version
|
# first the diffusers version
|
||||||
@ -292,8 +288,6 @@ def download_vaes(precision: str):
|
|||||||
args = dict(
|
args = dict(
|
||||||
cache_dir=global_cache_dir("diffusers"),
|
cache_dir=global_cache_dir("diffusers"),
|
||||||
)
|
)
|
||||||
if precision == "float16":
|
|
||||||
args.update(torch_dtype=torch.float16, revision="fp16")
|
|
||||||
if not AutoencoderKL.from_pretrained(repo_id, **args):
|
if not AutoencoderKL.from_pretrained(repo_id, **args):
|
||||||
raise Exception(f"download of {repo_id} failed")
|
raise Exception(f"download of {repo_id} failed")
|
||||||
|
|
||||||
@ -306,7 +300,6 @@ def download_vaes(precision: str):
|
|||||||
model_dir=str(Globals.root / Model_dir / Weights_dir),
|
model_dir=str(Globals.root / Model_dir / Weights_dir),
|
||||||
):
|
):
|
||||||
raise Exception(f"download of {model_name} failed")
|
raise Exception(f"download of {model_name} failed")
|
||||||
print("...downloaded successfully", file=sys.stderr)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr)
|
print(f"Error downloading StabilityAI standard VAE: {str(e)}", file=sys.stderr)
|
||||||
print(traceback.format_exc(), file=sys.stderr)
|
print(traceback.format_exc(), file=sys.stderr)
|
||||||
@ -332,8 +325,7 @@ class editOptsForm(npyscreen.FormMultiPage):
|
|||||||
old_opts = self.parentApp.invokeai_opts
|
old_opts = self.parentApp.invokeai_opts
|
||||||
first_time = not (Globals.root / Globals.initfile).exists()
|
first_time = not (Globals.root / Globals.initfile).exists()
|
||||||
access_token = HfFolder.get_token()
|
access_token = HfFolder.get_token()
|
||||||
|
window_width,window_height = get_terminal_size()
|
||||||
window_height, window_width = curses.initscr().getmaxyx()
|
|
||||||
for i in [
|
for i in [
|
||||||
"Configure startup settings. You can come back and change these later.",
|
"Configure startup settings. You can come back and change these later.",
|
||||||
"Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields.",
|
"Use ctrl-N and ctrl-P to move to the <N>ext and <P>revious fields.",
|
||||||
@ -676,6 +668,8 @@ def run_console_ui(
|
|||||||
) -> (Namespace, Namespace):
|
) -> (Namespace, Namespace):
|
||||||
# parse_args() will read from init file if present
|
# parse_args() will read from init file if present
|
||||||
invokeai_opts = default_startup_options(initfile)
|
invokeai_opts = default_startup_options(initfile)
|
||||||
|
|
||||||
|
set_min_terminal_size(MIN_COLS, MIN_LINES)
|
||||||
editApp = EditOptApplication(program_opts, invokeai_opts)
|
editApp = EditOptApplication(program_opts, invokeai_opts)
|
||||||
editApp.run()
|
editApp.run()
|
||||||
if editApp.user_cancelled:
|
if editApp.user_cancelled:
|
||||||
@ -683,7 +677,6 @@ def run_console_ui(
|
|||||||
else:
|
else:
|
||||||
return (editApp.new_opts, editApp.user_selections)
|
return (editApp.new_opts, editApp.user_selections)
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
def write_opts(opts: Namespace, init_file: Path):
|
def write_opts(opts: Namespace, init_file: Path):
|
||||||
"""
|
"""
|
||||||
@ -703,6 +696,9 @@ def write_opts(opts: Namespace, init_file: Path):
|
|||||||
args_to_skip = re.compile(
|
args_to_skip = re.compile(
|
||||||
"^--?(o|out|no-xformer|xformer|no-ckpt|ckpt|free|no-nsfw|nsfw|prec|max_load|embed|always|ckpt|free_gpu)"
|
"^--?(o|out|no-xformer|xformer|no-ckpt|ckpt|free|no-nsfw|nsfw|prec|max_load|embed|always|ckpt|free_gpu)"
|
||||||
)
|
)
|
||||||
|
# fix windows paths
|
||||||
|
opts.outdir = opts.outdir.replace('\\','/')
|
||||||
|
opts.embedding_path = opts.embedding_path.replace('\\','/')
|
||||||
new_file = f"{init_file}.new"
|
new_file = f"{init_file}.new"
|
||||||
try:
|
try:
|
||||||
lines = [x.strip() for x in open(init_file, "r").readlines()]
|
lines = [x.strip() for x in open(init_file, "r").readlines()]
|
||||||
@ -842,7 +838,7 @@ def main():
|
|||||||
download_codeformer()
|
download_codeformer()
|
||||||
download_clipseg()
|
download_clipseg()
|
||||||
download_safety_checker()
|
download_safety_checker()
|
||||||
download_vaes(init_options.precision)
|
download_vaes()
|
||||||
|
|
||||||
if opt.skip_sd_weights:
|
if opt.skip_sd_weights:
|
||||||
print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
|
print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
|
||||||
@ -853,10 +849,6 @@ def main():
|
|||||||
postscript(errors=errors)
|
postscript(errors=errors)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print("\nGoodbye! Come back soon.")
|
print("\nGoodbye! Come back soon.")
|
||||||
except Exception as e:
|
|
||||||
print(f'\nA problem occurred during initialization.\nThe error was: "{str(e)}"')
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -10,10 +10,8 @@ The work is actually done in backend code in model_install_backend.py.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import curses
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
|
||||||
from argparse import Namespace
|
from argparse import Namespace
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List
|
from typing import List
|
||||||
@ -22,15 +20,23 @@ import npyscreen
|
|||||||
import torch
|
import torch
|
||||||
from npyscreen import widget
|
from npyscreen import widget
|
||||||
from omegaconf import OmegaConf
|
from omegaconf import OmegaConf
|
||||||
|
from shutil import get_terminal_size
|
||||||
|
|
||||||
from ..devices import choose_precision, choose_torch_device
|
from ..devices import choose_precision, choose_torch_device
|
||||||
from ..globals import Globals, global_config_dir
|
from ..globals import Globals, global_config_dir
|
||||||
from .model_install_backend import (Dataset_path, default_config_file,
|
from .model_install_backend import (Dataset_path, default_config_file,
|
||||||
default_dataset, get_root,
|
default_dataset, get_root,
|
||||||
install_requested_models,
|
install_requested_models,
|
||||||
recommended_datasets)
|
recommended_datasets,
|
||||||
|
)
|
||||||
from .widgets import (MultiSelectColumns, TextBox,
|
from .widgets import (MultiSelectColumns, TextBox,
|
||||||
OffsetButtonPress, CenteredTitleText)
|
OffsetButtonPress, CenteredTitleText,
|
||||||
|
set_min_terminal_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
# minimum size for the UI
|
||||||
|
MIN_COLS = 120
|
||||||
|
MIN_LINES = 45
|
||||||
|
|
||||||
class addModelsForm(npyscreen.FormMultiPage):
|
class addModelsForm(npyscreen.FormMultiPage):
|
||||||
# for responsive resizing - disabled
|
# for responsive resizing - disabled
|
||||||
@ -50,7 +56,7 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
super().__init__(parentApp=parentApp, name=name, *args, **keywords)
|
super().__init__(parentApp=parentApp, name=name, *args, **keywords)
|
||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
window_height, window_width = curses.initscr().getmaxyx()
|
window_width, window_height = get_terminal_size()
|
||||||
starter_model_labels = self._get_starter_model_labels()
|
starter_model_labels = self._get_starter_model_labels()
|
||||||
recommended_models = [
|
recommended_models = [
|
||||||
x
|
x
|
||||||
@ -249,7 +255,7 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _get_starter_model_labels(self) -> List[str]:
|
def _get_starter_model_labels(self) -> List[str]:
|
||||||
window_height, window_width = curses.initscr().getmaxyx()
|
window_width, window_height = get_terminal_size()
|
||||||
label_width = 25
|
label_width = 25
|
||||||
checkbox_width = 4
|
checkbox_width = 4
|
||||||
spacing_width = 2
|
spacing_width = 2
|
||||||
@ -268,7 +274,7 @@ class addModelsForm(npyscreen.FormMultiPage):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def _get_columns(self) -> int:
|
def _get_columns(self) -> int:
|
||||||
window_height, window_width = curses.initscr().getmaxyx()
|
window_width, window_height = get_terminal_size()
|
||||||
cols = (
|
cols = (
|
||||||
4
|
4
|
||||||
if window_width > 240
|
if window_width > 240
|
||||||
@ -362,7 +368,6 @@ class AddModelApplication(npyscreen.NPSAppManaged):
|
|||||||
"MAIN", addModelsForm, name="Install Stable Diffusion Models"
|
"MAIN", addModelsForm, name="Install Stable Diffusion Models"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------
|
# --------------------------------------------------------
|
||||||
def process_and_execute(opt: Namespace, selections: Namespace):
|
def process_and_execute(opt: Namespace, selections: Namespace):
|
||||||
models_to_remove = [
|
models_to_remove = [
|
||||||
@ -409,6 +414,7 @@ def select_and_download_models(opt: Namespace):
|
|||||||
precision=precision,
|
precision=precision,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
set_min_terminal_size(MIN_COLS, MIN_LINES)
|
||||||
installApp = AddModelApplication()
|
installApp = AddModelApplication()
|
||||||
installApp.run()
|
installApp.run()
|
||||||
|
|
||||||
@ -475,7 +481,7 @@ def main():
|
|||||||
sys.exit(-1)
|
sys.exit(-1)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
print("\nGoodbye! Come back soon.")
|
print("\nGoodbye! Come back soon.")
|
||||||
except (widget.NotEnoughSpaceForWidget, Exception) as e:
|
except widget.NotEnoughSpaceForWidget as e:
|
||||||
if str(e).startswith("Height of 1 allocated"):
|
if str(e).startswith("Height of 1 allocated"):
|
||||||
print(
|
print(
|
||||||
"** Insufficient vertical space for the interface. Please make your window taller and try again"
|
"** Insufficient vertical space for the interface. Please make your window taller and try again"
|
||||||
@ -484,11 +490,6 @@ def main():
|
|||||||
print(
|
print(
|
||||||
"** Insufficient horizontal space for the interface. Please make your window wider and try again."
|
"** Insufficient horizontal space for the interface. Please make your window wider and try again."
|
||||||
)
|
)
|
||||||
else:
|
|
||||||
print(f"** An error has occurred: {str(e)}")
|
|
||||||
traceback.print_exc()
|
|
||||||
sys.exit(-1)
|
|
||||||
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -67,6 +67,9 @@ def install_requested_models(
|
|||||||
purge_deleted: bool = False,
|
purge_deleted: bool = False,
|
||||||
config_file_path: Path = None,
|
config_file_path: Path = None,
|
||||||
):
|
):
|
||||||
|
'''
|
||||||
|
Entry point for installing/deleting starter models, or installing external models.
|
||||||
|
'''
|
||||||
config_file_path=config_file_path or default_config_file()
|
config_file_path=config_file_path or default_config_file()
|
||||||
if not config_file_path.exists():
|
if not config_file_path.exists():
|
||||||
open(config_file_path,'w')
|
open(config_file_path,'w')
|
||||||
@ -117,12 +120,13 @@ def install_requested_models(
|
|||||||
argument = '--autoconvert' if convert_to_diffusers else '--autoimport'
|
argument = '--autoconvert' if convert_to_diffusers else '--autoimport'
|
||||||
initfile = Path(Globals.root, Globals.initfile)
|
initfile = Path(Globals.root, Globals.initfile)
|
||||||
replacement = Path(Globals.root, f'{Globals.initfile}.new')
|
replacement = Path(Globals.root, f'{Globals.initfile}.new')
|
||||||
|
directory = str(scan_directory).replace('\\','/')
|
||||||
with open(initfile,'r') as input:
|
with open(initfile,'r') as input:
|
||||||
with open(replacement,'w') as output:
|
with open(replacement,'w') as output:
|
||||||
while line := input.readline():
|
while line := input.readline():
|
||||||
if not line.startswith(argument):
|
if not line.startswith(argument):
|
||||||
output.writelines([line])
|
output.writelines([line])
|
||||||
output.writelines([f'{argument} {str(scan_directory)}'])
|
output.writelines([f'{argument} {directory}'])
|
||||||
os.replace(replacement,initfile)
|
os.replace(replacement,initfile)
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
@ -231,7 +235,6 @@ def _download_ckpt_weights(mconfig: DictConfig, access_token: str) -> Path:
|
|||||||
def download_from_hf(
|
def download_from_hf(
|
||||||
model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs
|
model_class: object, model_name: str, cache_subdir: Path = Path("hub"), **kwargs
|
||||||
):
|
):
|
||||||
print("", file=sys.stderr) # to prevent tqdm from overwriting
|
|
||||||
path = global_cache_dir(cache_subdir)
|
path = global_cache_dir(cache_subdir)
|
||||||
model = model_class.from_pretrained(
|
model = model_class.from_pretrained(
|
||||||
model_name,
|
model_name,
|
||||||
|
@ -2,8 +2,34 @@
|
|||||||
Widget class definitions used by model_select.py, merge_diffusers.py and textual_inversion.py
|
Widget class definitions used by model_select.py, merge_diffusers.py and textual_inversion.py
|
||||||
'''
|
'''
|
||||||
import math
|
import math
|
||||||
|
import platform
|
||||||
import npyscreen
|
import npyscreen
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
import curses
|
import curses
|
||||||
|
import struct
|
||||||
|
|
||||||
|
from shutil import get_terminal_size
|
||||||
|
|
||||||
|
# -------------------------------------
|
||||||
|
def set_terminal_size(columns: int, lines: int):
|
||||||
|
OS = platform.uname().system
|
||||||
|
if OS=="Windows":
|
||||||
|
os.system(f'mode con: cols={columns} lines={lines}')
|
||||||
|
elif OS in ['Darwin', 'Linux']:
|
||||||
|
import termios
|
||||||
|
import fcntl
|
||||||
|
winsize = struct.pack("HHHH", lines, columns, 0, 0)
|
||||||
|
fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize)
|
||||||
|
sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=lines, cols=columns))
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
def set_min_terminal_size(min_cols: int, min_lines: int):
|
||||||
|
# make sure there's enough room for the ui
|
||||||
|
term_cols, term_lines = get_terminal_size()
|
||||||
|
cols = max(term_cols, min_cols)
|
||||||
|
lines = max(term_lines, min_lines)
|
||||||
|
set_terminal_size(cols,lines)
|
||||||
|
|
||||||
class IntSlider(npyscreen.Slider):
|
class IntSlider(npyscreen.Slider):
|
||||||
def translate_value(self):
|
def translate_value(self):
|
||||||
@ -23,7 +49,6 @@ class CenteredTitleText(npyscreen.TitleText):
|
|||||||
maxy, maxx = self.parent.curses_pad.getmaxyx()
|
maxy, maxx = self.parent.curses_pad.getmaxyx()
|
||||||
label = self.name
|
label = self.name
|
||||||
self.relx = (maxx - len(label)) // 2
|
self.relx = (maxx - len(label)) // 2
|
||||||
begin_entry_at = -self.relx + 2
|
|
||||||
|
|
||||||
# -------------------------------------
|
# -------------------------------------
|
||||||
class CenteredButtonPress(npyscreen.ButtonPress):
|
class CenteredButtonPress(npyscreen.ButtonPress):
|
||||||
|
@ -428,8 +428,6 @@ class ModelManager(object):
|
|||||||
torch.cuda.reset_peak_memory_stats()
|
torch.cuda.reset_peak_memory_stats()
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
tic = time.time()
|
|
||||||
|
|
||||||
# this does the work
|
# this does the work
|
||||||
if not os.path.isabs(config):
|
if not os.path.isabs(config):
|
||||||
config = os.path.join(Globals.root, config)
|
config = os.path.join(Globals.root, config)
|
||||||
@ -642,21 +640,18 @@ class ModelManager(object):
|
|||||||
models.yaml file.
|
models.yaml file.
|
||||||
"""
|
"""
|
||||||
model_name = model_name or Path(repo_or_path).stem
|
model_name = model_name or Path(repo_or_path).stem
|
||||||
description = model_description or f"imported diffusers model {model_name}"
|
model_description = model_description or f"Imported diffusers model {model_name}"
|
||||||
new_config = dict(
|
new_config = dict(
|
||||||
description=model_description,
|
description=model_description,
|
||||||
vae=vae,
|
vae=vae,
|
||||||
format="diffusers",
|
format="diffusers",
|
||||||
)
|
)
|
||||||
print(f"DEBUG: here i am 1")
|
|
||||||
if isinstance(repo_or_path, Path) and repo_or_path.exists():
|
if isinstance(repo_or_path, Path) and repo_or_path.exists():
|
||||||
new_config.update(path=str(repo_or_path))
|
new_config.update(path=str(repo_or_path))
|
||||||
else:
|
else:
|
||||||
new_config.update(repo_id=repo_or_path)
|
new_config.update(repo_id=repo_or_path)
|
||||||
print(f"DEBUG: here i am 2")
|
|
||||||
|
|
||||||
self.add_model(model_name, new_config, True)
|
self.add_model(model_name, new_config, True)
|
||||||
print(f"DEBUG: config = {self.config}")
|
|
||||||
if commit_to_conf:
|
if commit_to_conf:
|
||||||
self.commit(commit_to_conf)
|
self.commit(commit_to_conf)
|
||||||
return model_name
|
return model_name
|
||||||
@ -704,7 +699,7 @@ class ModelManager(object):
|
|||||||
model_name or Path(weights).stem
|
model_name or Path(weights).stem
|
||||||
) # note this gives ugly pathnames if used on a URL without a Content-Disposition header
|
) # note this gives ugly pathnames if used on a URL without a Content-Disposition header
|
||||||
model_description = (
|
model_description = (
|
||||||
model_description or f"imported stable diffusion weights file {model_name}"
|
model_description or f"Imported stable diffusion weights file {model_name}"
|
||||||
)
|
)
|
||||||
new_config = dict(
|
new_config = dict(
|
||||||
weights=str(weights_path),
|
weights=str(weights_path),
|
||||||
@ -840,7 +835,7 @@ class ModelManager(object):
|
|||||||
thing, commit_to_conf=commit_to_conf
|
thing, commit_to_conf=commit_to_conf
|
||||||
)
|
)
|
||||||
pipeline, _, _, _ = self._load_diffusers_model(self.config[model_name])
|
pipeline, _, _, _ = self._load_diffusers_model(self.config[model_name])
|
||||||
|
return model_name
|
||||||
else:
|
else:
|
||||||
print(
|
print(
|
||||||
f"** {thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id"
|
f"** {thing}: Unknown thing. Please provide a URL, file path, directory or HuggingFace repo_id"
|
||||||
|
@ -38,7 +38,7 @@ dependencies = [
|
|||||||
"albumentations",
|
"albumentations",
|
||||||
"click",
|
"click",
|
||||||
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
|
"clip_anytorch", # replacing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
|
||||||
"compel>=0.1.6",
|
"compel==0.1.7",
|
||||||
"datasets",
|
"datasets",
|
||||||
"diffusers[torch]~=0.13",
|
"diffusers[torch]~=0.13",
|
||||||
"dnspython==2.2.1",
|
"dnspython==2.2.1",
|
||||||
|
Loading…
Reference in New Issue
Block a user