version 2.3.3-rc2

- installer now installs the pretty dialog-based console launcher
- added dialogrc for custom colors
- add updater to download new launcher when users do an update
This commit is contained in:
Lincoln Stein 2023-03-27 21:10:24 -04:00
parent 77a63e5310
commit 90f00db032
17 changed files with 198 additions and 9037 deletions

View File

@ -1,164 +0,0 @@
@echo off
@rem This script will install git (if not found on the PATH variable)
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
@rem For users who already have git, this step will be skipped.
@rem Next, it'll download the project's source code.
@rem Then it will download a self-contained, standalone Python and unpack it.
@rem Finally, it'll create the Python virtual environment and preload the models.
@rem This enables a user to install this project without manually installing git or Python
@rem change to the script's directory
PUSHD "%~dp0"
set "no_cache_dir=--no-cache-dir"
if "%1" == "use-cache" (
set "no_cache_dir="
)
echo ***** Installing InvokeAI.. *****
@rem Config
set INSTALL_ENV_DIR=%cd%\installer_files\env
@rem https://mamba.readthedocs.io/en/latest/installation.html
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
set PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
set PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-x86_64-pc-windows-msvc-shared-install_only.tar.gz
set PACKAGES_TO_INSTALL=
call git --version >.tmp1 2>.tmp2
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
@rem Cleanup
del /q .tmp1 .tmp2
@rem (if necessary) install git into a contained environment
if "%PACKAGES_TO_INSTALL%" NEQ "" (
@rem download micromamba
echo ***** Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to micromamba.exe *****
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > micromamba.exe
@rem test the mamba binary
echo ***** Micromamba version: *****
call micromamba.exe --version
@rem create the installer env
if not exist "%INSTALL_ENV_DIR%" (
call micromamba.exe create -y --prefix "%INSTALL_ENV_DIR%"
)
echo ***** Packages to install:%PACKAGES_TO_INSTALL% *****
call micromamba.exe install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
if not exist "%INSTALL_ENV_DIR%" (
echo ----- There was a problem while installing "%PACKAGES_TO_INSTALL%" using micromamba. Cannot continue. -----
pause
exit /b
)
)
del /q micromamba.exe
@rem For 'git' only
set PATH=%INSTALL_ENV_DIR%\Library\bin;%PATH%
@rem Download/unpack/clean up InvokeAI release sourceball
set err_msg=----- InvokeAI source download failed -----
echo Trying to download "%RELEASE_URL%%RELEASE_SOURCEBALL%"
curl -L %RELEASE_URL%%RELEASE_SOURCEBALL% --output InvokeAI.tgz
if %errorlevel% neq 0 goto err_exit
set err_msg=----- InvokeAI source unpack failed -----
tar -zxf InvokeAI.tgz
if %errorlevel% neq 0 goto err_exit
del /q InvokeAI.tgz
set err_msg=----- InvokeAI source copy failed -----
cd InvokeAI-*
xcopy . .. /e /h
if %errorlevel% neq 0 goto err_exit
cd ..
@rem cleanup
for /f %%i in ('dir /b InvokeAI-*') do rd /s /q %%i
rd /s /q .dev_scripts .github docker-build tests
del /q requirements.in requirements-mkdocs.txt shell.nix
echo ***** Unpacked InvokeAI source *****
@rem Download/unpack/clean up python-build-standalone
set err_msg=----- Python download failed -----
curl -L %PYTHON_BUILD_STANDALONE_URL%/%PYTHON_BUILD_STANDALONE% --output python.tgz
if %errorlevel% neq 0 goto err_exit
set err_msg=----- Python unpack failed -----
tar -zxf python.tgz
if %errorlevel% neq 0 goto err_exit
del /q python.tgz
echo ***** Unpacked python-build-standalone *****
@rem create venv
set err_msg=----- problem creating venv -----
.\python\python -E -s -m venv .venv
if %errorlevel% neq 0 goto err_exit
call .venv\Scripts\activate.bat
echo ***** Created Python virtual environment *****
@rem Print venv's Python version
set err_msg=----- problem calling venv's python -----
echo We're running under
.venv\Scripts\python --version
if %errorlevel% neq 0 goto err_exit
set err_msg=----- pip update failed -----
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location --upgrade pip wheel
if %errorlevel% neq 0 goto err_exit
echo ***** Updated pip and wheel *****
set err_msg=----- requirements file copy failed -----
copy binary_installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
if %errorlevel% neq 0 goto err_exit
set err_msg=----- main pip install failed -----
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -r requirements.txt
if %errorlevel% neq 0 goto err_exit
echo ***** Installed Python dependencies *****
set err_msg=----- InvokeAI setup failed -----
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -e .
if %errorlevel% neq 0 goto err_exit
copy binary_installer\invoke.bat.in .\invoke.bat
echo ***** Installed invoke launcher script ******
@rem more cleanup
rd /s /q binary_installer installer_files
@rem preload the models
call .venv\Scripts\python ldm\invoke\config\invokeai_configure.py
set err_msg=----- model download clone failed -----
if %errorlevel% neq 0 goto err_exit
deactivate
echo ***** Finished downloading models *****
echo All done! Execute the file invoke.bat in this directory to start InvokeAI
pause
exit
:err_exit
echo %err_msg%
pause
exit

View File

@ -1,235 +0,0 @@
#!/usr/bin/env bash
# ensure we're in the correct folder in case user's CWD is somewhere else
scriptdir=$(dirname "$0")
cd "$scriptdir"
set -euo pipefail
IFS=$'\n\t'
function _err_exit {
if test "$1" -ne 0
then
echo -e "Error code $1; Error caught was '$2'"
read -p "Press any key to exit..."
exit
fi
}
# This script will install git (if not found on the PATH variable)
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
# For users who already have git, this step will be skipped.
# Next, it'll download the project's source code.
# Then it will download a self-contained, standalone Python and unpack it.
# Finally, it'll create the Python virtual environment and preload the models.
# This enables a user to install this project without manually installing git or Python
echo -e "\n***** Installing InvokeAI into $(pwd)... *****\n"
export no_cache_dir="--no-cache-dir"
if [ $# -ge 1 ]; then
if [ "$1" = "use-cache" ]; then
export no_cache_dir=""
fi
fi
OS_NAME=$(uname -s)
case "${OS_NAME}" in
Linux*) OS_NAME="linux";;
Darwin*) OS_NAME="darwin";;
*) echo -e "\n----- Unknown OS: $OS_NAME! This script runs only on Linux or macOS -----\n" && exit
esac
OS_ARCH=$(uname -m)
case "${OS_ARCH}" in
x86_64*) ;;
arm64*) ;;
*) echo -e "\n----- Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64 -----\n" && exit
esac
# https://mamba.readthedocs.io/en/latest/installation.html
MAMBA_OS_NAME=$OS_NAME
MAMBA_ARCH=$OS_ARCH
if [ "$OS_NAME" == "darwin" ]; then
MAMBA_OS_NAME="osx"
fi
if [ "$OS_ARCH" == "linux" ]; then
MAMBA_ARCH="aarch64"
fi
if [ "$OS_ARCH" == "x86_64" ]; then
MAMBA_ARCH="64"
fi
PY_ARCH=$OS_ARCH
if [ "$OS_ARCH" == "arm64" ]; then
PY_ARCH="aarch64"
fi
# Compute device ('cd' segment of reqs files) detect goes here
# This needs a ton of work
# Suggestions:
# - lspci
# - check $PATH for nvidia-smi, gtt CUDA/GPU version from output
# - Surely there's a similar utility for AMD?
CD="cuda"
if [ "$OS_NAME" == "darwin" ] && [ "$OS_ARCH" == "arm64" ]; then
CD="mps"
fi
# config
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${MAMBA_OS_NAME}-${MAMBA_ARCH}/latest"
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
if [ "$OS_NAME" == "darwin" ]; then
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-apple-darwin-install_only.tar.gz
elif [ "$OS_NAME" == "linux" ]; then
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
fi
echo "INSTALLING $RELEASE_SOURCEBALL FROM $RELEASE_URL"
PACKAGES_TO_INSTALL=""
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
# (if necessary) install git and conda into a contained environment
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
# download micromamba
echo -e "\n***** Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to micromamba *****\n"
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvjO bin/micromamba > micromamba
chmod u+x ./micromamba
# test the mamba binary
echo -e "\n***** Micromamba version: *****\n"
./micromamba --version
# create the installer env
if [ ! -e "$INSTALL_ENV_DIR" ]; then
./micromamba create -y --prefix "$INSTALL_ENV_DIR"
fi
echo -e "\n***** Packages to install:$PACKAGES_TO_INSTALL *****\n"
./micromamba install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge "$PACKAGES_TO_INSTALL"
if [ ! -e "$INSTALL_ENV_DIR" ]; then
echo -e "\n----- There was a problem while initializing micromamba. Cannot continue. -----\n"
exit
fi
fi
rm -f micromamba.exe
export PATH="$INSTALL_ENV_DIR/bin:$PATH"
# Download/unpack/clean up InvokeAI release sourceball
_err_msg="\n----- InvokeAI source download failed -----\n"
curl -L $RELEASE_URL/$RELEASE_SOURCEBALL --output InvokeAI.tgz
_err_exit $? _err_msg
_err_msg="\n----- InvokeAI source unpack failed -----\n"
tar -zxf InvokeAI.tgz
_err_exit $? _err_msg
rm -f InvokeAI.tgz
_err_msg="\n----- InvokeAI source copy failed -----\n"
cd InvokeAI-*
cp -r . ..
_err_exit $? _err_msg
cd ..
# cleanup
rm -rf InvokeAI-*/
rm -rf .dev_scripts/ .github/ docker-build/ tests/ requirements.in requirements-mkdocs.txt shell.nix
echo -e "\n***** Unpacked InvokeAI source *****\n"
# Download/unpack/clean up python-build-standalone
_err_msg="\n----- Python download failed -----\n"
curl -L $PYTHON_BUILD_STANDALONE_URL/$PYTHON_BUILD_STANDALONE --output python.tgz
_err_exit $? _err_msg
_err_msg="\n----- Python unpack failed -----\n"
tar -zxf python.tgz
_err_exit $? _err_msg
rm -f python.tgz
echo -e "\n***** Unpacked python-build-standalone *****\n"
# create venv
_err_msg="\n----- problem creating venv -----\n"
if [ "$OS_NAME" == "darwin" ]; then
# patch sysconfig so that extensions can build properly
# adapted from https://github.com/cashapp/hermit-packages/commit/fcba384663892f4d9cfb35e8639ff7a28166ee43
PYTHON_INSTALL_DIR="$(pwd)/python"
SYSCONFIG="$(echo python/lib/python*/_sysconfigdata_*.py)"
TMPFILE="$(mktemp)"
chmod +w "${SYSCONFIG}"
cp "${SYSCONFIG}" "${TMPFILE}"
sed "s,'/install,'${PYTHON_INSTALL_DIR},g" "${TMPFILE}" > "${SYSCONFIG}"
rm -f "${TMPFILE}"
fi
./python/bin/python3 -E -s -m venv .venv
_err_exit $? _err_msg
source .venv/bin/activate
echo -e "\n***** Created Python virtual environment *****\n"
# Print venv's Python version
_err_msg="\n----- problem calling venv's python -----\n"
echo -e "We're running under"
.venv/bin/python3 --version
_err_exit $? _err_msg
_err_msg="\n----- pip update failed -----\n"
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location --upgrade pip
_err_exit $? _err_msg
echo -e "\n***** Updated pip *****\n"
_err_msg="\n----- requirements file copy failed -----\n"
cp binary_installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
_err_exit $? _err_msg
_err_msg="\n----- main pip install failed -----\n"
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -r requirements.txt
_err_exit $? _err_msg
echo -e "\n***** Installed Python dependencies *****\n"
_err_msg="\n----- InvokeAI setup failed -----\n"
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -e .
_err_exit $? _err_msg
echo -e "\n***** Installed InvokeAI *****\n"
cp binary_installer/invoke.sh.in ./invoke.sh
chmod a+rx ./invoke.sh
echo -e "\n***** Installed invoke launcher script ******\n"
# more cleanup
rm -rf binary_installer/ installer_files/
# preload the models
.venv/bin/python3 scripts/configure_invokeai.py
_err_msg="\n----- model download clone failed -----\n"
_err_exit $? _err_msg
deactivate
echo -e "\n***** Finished downloading models *****\n"
echo "All done! Run the command"
echo " $scriptdir/invoke.sh"
echo "to start InvokeAI."
read -p "Press any key to exit..."
exit

View File

@ -1,36 +0,0 @@
@echo off
PUSHD "%~dp0"
call .venv\Scripts\activate.bat
echo Do you want to generate images using the
echo 1. command-line
echo 2. browser-based UI
echo OR
echo 3. open the developer console
set /p choice="Please enter 1, 2 or 3: "
if /i "%choice%" == "1" (
echo Starting the InvokeAI command-line.
.venv\Scripts\python scripts\invoke.py %*
) else if /i "%choice%" == "2" (
echo Starting the InvokeAI browser-based UI.
.venv\Scripts\python scripts\invoke.py --web %*
) else if /i "%choice%" == "3" (
echo Developer Console
echo Python command is:
where python
echo Python version is:
python --version
echo *************************
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
echo so that you can troubleshoot this InvokeAI installation as necessary.
echo *************************
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
call cmd /k
) else (
echo Invalid selection
pause
exit /b
)
deactivate

View File

@ -1,46 +0,0 @@
#!/usr/bin/env sh
set -eu
. .venv/bin/activate
# set required env var for torch on mac MPS
if [ "$(uname -s)" == "Darwin" ]; then
export PYTORCH_ENABLE_MPS_FALLBACK=1
fi
echo "Do you want to generate images using the"
echo "1. command-line"
echo "2. browser-based UI"
echo "OR"
echo "3. open the developer console"
echo "Please enter 1, 2, or 3:"
read choice
case $choice in
1)
printf "\nStarting the InvokeAI command-line..\n";
.venv/bin/python scripts/invoke.py $*;
;;
2)
printf "\nStarting the InvokeAI browser-based UI..\n";
.venv/bin/python scripts/invoke.py --web $*;
;;
3)
printf "\nDeveloper Console:\n";
printf "Python command is:\n\t";
which python;
printf "Python version is:\n\t";
python --version;
echo "*************************"
echo "You are now in your user shell ($SHELL) with the local InvokeAI Python virtual environment activated,";
echo "so that you can troubleshoot this InvokeAI installation as necessary.";
printf "*************************\n"
echo "*** Type \`exit\` to quit this shell and deactivate the Python virtual environment *** ";
/usr/bin/env "$SHELL";
;;
*)
echo "Invalid selection";
exit
;;
esac

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +0,0 @@
InvokeAI
Project homepage: https://github.com/invoke-ai/InvokeAI
Installation on Windows:
NOTE: You might need to enable Windows Long Paths. If you're not sure,
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
file. Note that you will need to have admin privileges in order to
do this.
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
Installation on Linux and Mac:
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
file (on Linux/Mac) to start InvokeAI.

View File

@ -1,33 +0,0 @@
--prefer-binary
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
--extra-index-url https://download.pytorch.org/whl/cu116
--trusted-host https://download.pytorch.org
accelerate~=0.15
albumentations
diffusers[torch]~=0.11
einops
eventlet
flask_cors
flask_socketio
flaskwebgui==1.0.3
getpass_asterisk
imageio-ffmpeg
pyreadline3
realesrgan
send2trash
streamlit
taming-transformers-rom1504
test-tube
torch-fidelity
torch==1.12.1 ; platform_system == 'Darwin'
torch==1.12.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
torchvision==0.13.1 ; platform_system == 'Darwin'
torchvision==0.13.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
transformers
picklescan
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip
https://github.com/invoke-ai/GFPGAN/archive/3f5d2397361199bc4a91c08bb7d80f04d7805615.zip ; platform_system=='Windows'
https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system=='Linux' or platform_system=='Darwin'
https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip
https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip

View File

@ -241,14 +241,18 @@ class InvokeAiInstance:
from plumbum import FG, local from plumbum import FG, local
# Note that we're installing pinned versions of torch and
# torchvision here, which may not correspond to what is
# in pyproject.toml. This is a hack to prevent torch 2.0 from
# being installed and immediately uninstalled and replaced with 1.13
pip = local[self.pip] pip = local[self.pip]
( (
pip[ pip[
"install", "install",
"--require-virtualenv", "--require-virtualenv",
"torch", "torch~=1.13.1",
"torchvision", "torchvision>=0.14.1",
"--force-reinstall", "--force-reinstall",
"--find-links" if find_links is not None else None, "--find-links" if find_links is not None else None,
find_links, find_links,
@ -379,6 +383,9 @@ class InvokeAiInstance:
shutil.copy(src, dest) shutil.copy(src, dest)
os.chmod(dest, 0o0755) os.chmod(dest, 0o0755)
if OS == "Linux":
shutil.copy(Path(__file__).parent / '..' / "templates" / "dialogrc", self.runtime / '.dialogrc')
def update(self): def update(self):
pass pass

View File

@ -0,0 +1,27 @@
# Screen
use_shadow = OFF
use_colors = ON
screen_color = (BLACK, BLACK, ON)
# Box
dialog_color = (YELLOW, BLACK , ON)
title_color = (YELLOW, BLACK, ON)
border_color = (YELLOW, BLACK, OFF)
border2_color = (YELLOW, BLACK, OFF)
# Button
button_active_color = (RED, BLACK, OFF)
button_inactive_color = (YELLOW, BLACK, OFF)
button_label_active_color = (YELLOW,BLACK,ON)
button_label_inactive_color = (YELLOW,BLACK,ON)
# Menu box
menubox_color = (BLACK, BLACK, ON)
menubox_border_color = (YELLOW, BLACK, OFF)
menubox_border2_color = (YELLOW, BLACK, OFF)
# Menu window
item_color = (YELLOW, BLACK, OFF)
item_selected_color = (BLACK, YELLOW, OFF)
tag_key_color = (YELLOW, BLACK, OFF)
tag_key_selected_color = (BLACK, YELLOW, OFF)

View File

@ -1,6 +1,8 @@
#!/bin/bash #!/bin/bash
# coauthored by Lincoln Stein, Eugene Brodsky and JoshuaKimsey # MIT License
# Coauthored by Lincoln Stein, Eugene Brodsky and Joshua Kimsey
# Copyright 2023, The InvokeAI Development Team # Copyright 2023, The InvokeAI Development Team
#### ####
@ -14,7 +16,7 @@
set -eu set -eu
# ensure we're in the correct folder in case user's CWD is somewhere else # Ensure we're in the correct folder in case user's CWD is somewhere else
scriptdir=$(dirname "$0") scriptdir=$(dirname "$0")
cd "$scriptdir" cd "$scriptdir"
@ -23,133 +25,159 @@ cd "$scriptdir"
export INVOKEAI_ROOT="$scriptdir" export INVOKEAI_ROOT="$scriptdir"
PARAMS=$@ PARAMS=$@
# set required env var for torch on mac MPS # Check to see if dialog is installed (it seems to be fairly standard, but good to check regardless) and if the user has passed the --no-tui argument to disable the dialog TUI
tui=true
if command -v dialog &>/dev/null; then
# This must use $@ to properly loop through the arguments passed by the user
for arg in "$@"; do
if [ "$arg" == "--no-tui" ]; then
tui=false
# Remove the --no-tui argument to avoid errors later on when passing arguments to InvokeAI
PARAMS=$(echo "$PARAMS" | sed 's/--no-tui//')
break
fi
done
else
tui=false
fi
# Set required env var for torch on mac MPS
if [ "$(uname -s)" == "Darwin" ]; then if [ "$(uname -s)" == "Darwin" ]; then
export PYTORCH_ENABLE_MPS_FALLBACK=1 export PYTORCH_ENABLE_MPS_FALLBACK=1
fi fi
# Primary function for the case statement to determine user input
do_choice() { do_choice() {
case $1 in case $1 in
1) 1)
echo "Generate images with a browser-based interface" clear
clear printf "Generate images with a browser-based interface\n"
invokeai --web $PARAMS invokeai --web $PARAMS
;; ;;
2) 2)
echo "Generate images using a command-line interface" clear
clear printf "Generate images using a command-line interface\n"
invokeai $PARAMS invokeai $PARAMS
;; ;;
3) 3)
echo "Textual inversion training" clear
clear printf "Textual inversion training\n"
invokeai-ti --gui $PARAMS invokeai-ti --gui $PARAMS
;; ;;
4) 4)
echo "Merge models (diffusers type only)" clear
clear printf "Merge models (diffusers type only)\n"
invokeai-merge --gui $PARAMS invokeai-merge --gui $PARAMS
;; ;;
5) 5)
echo "Download and install models" clear
clear printf "Download and install models\n"
invokeai-model-install --root ${INVOKEAI_ROOT} invokeai-model-install --root ${INVOKEAI_ROOT}
;; ;;
6) 6)
echo "Change InvokeAI startup options" clear
clear printf "Change InvokeAI startup options\n"
invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models invokeai-configure --root ${INVOKEAI_ROOT} --skip-sd-weights --skip-support-models
;; ;;
7) 7)
echo "Re-run the configure script to fix a broken install" clear
clear printf "Re-run the configure script to fix a broken install\n"
invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only invokeai-configure --root ${INVOKEAI_ROOT} --yes --default_only
;; ;;
8) 8)
echo "Open the developer console" clear
clear printf "Open the developer console\n"
file_name=$(basename "${BASH_SOURCE[0]}") file_name=$(basename "${BASH_SOURCE[0]}")
bash --init-file "$file_name" bash --init-file "$file_name"
;; ;;
9) 9)
echo "Update InvokeAI" clear
clear printf "Update InvokeAI\n"
invokeai-update invokeai-update
;; ;;
10) 10)
echo "Command-line help" clear
clear printf "Command-line help\n"
invokeai --help invokeai --help
;; ;;
*) "HELP 1")
echo "Exiting..." clear
exit printf "Command-line help\n"
;; invokeai --help
;;
*)
clear
printf "Exiting...\n"
exit
;;
esac esac
clear clear
} }
# Dialog-based TUI for launcing Invoke functions
do_dialog() { do_dialog() {
while true options=(
do 1 "Generate images with a browser-based interface"
options=( 2 "Generate images using a command-line interface"
1 "Generate images with a browser-based interface" 3 "Textual inversion training"
2 "Generate images using a command-line interface" 4 "Merge models (diffusers type only)"
3 "Textual inversion training" 5 "Download and install models"
4 "Merge models (diffusers type only)" 6 "Change InvokeAI startup options"
5 "Download and install models" 7 "Re-run the configure script to fix a broken install"
6 "Change InvokeAI startup options" 8 "Open the developer console"
7 "Re-run the configure script to fix a broken install" 9 "Update InvokeAI")
8 "Open the developer console"
9 "Update InvokeAI"
10 "Command-line help")
choice=$(dialog --clear \ choice=$(dialog --clear \
--backtitle "InvokeAI" \ --backtitle "\Zb\Zu\Z3InvokeAI" \
--title "What would you like to run?" \ --colors \
--menu "Select an option:" \ --title "What would you like to run?" \
0 0 0 \ --ok-label "Run" \
"${options[@]}" \ --cancel-label "Exit" \
2>&1 >/dev/tty) || clear --help-button \
do_choice "$choice" --help-label "CLI Help" \
done --menu "Select an option:" \
0 0 0 \
"${options[@]}" \
2>&1 >/dev/tty) || clear
do_choice "$choice"
clear clear
} }
# Command-line interface for launching Invoke functions
do_line_input() { do_line_input() {
echo " ** For a more attractive experience, please install the 'dialog' utility. **" clear
echo "" printf " ** For a more attractive experience, please install the 'dialog' utility using your package manager. **\n\n"
while true printf "Do you want to generate images using the\n"
do printf "1: Browser-based UI\n"
echo "Do you want to generate images using the" printf "2: Command-line interface\n"
echo "1. browser-based UI" printf "3: Run textual inversion training\n"
echo "2. command-line interface" printf "4: Merge models (diffusers type only)\n"
echo "3. run textual inversion training" printf "5: Download and install models\n"
echo "4. merge models (diffusers type only)" printf "6: Change InvokeAI startup options\n"
echo "5. download and install models" printf "7: Re-run the configure script to fix a broken install\n"
echo "6. change InvokeAI startup options" printf "8: Open the developer console\n"
echo "7. re-run the configure script to fix a broken install" printf "9: Update InvokeAI\n"
echo "8. open the developer console" printf "10: Command-line help\n"
echo "9. update InvokeAI" printf "Q: Quit\n\n"
echo "10. command-line help" read -p "Please enter 1-10, Q: [1] " yn
echo "Q - Quit" choice=${yn:='1'}
echo "" do_choice $choice
read -p "Please enter 1-10, Q: [1] " yn clear
choice=${yn:='1'}
do_choice $choice
done
} }
# Main IF statement for launching Invoke with either the TUI or CLI, and for checking if the user is in the developer console
if [ "$0" != "bash" ]; then if [ "$0" != "bash" ]; then
# Dialog seems to be a standard installtion for most Linux distros, but this checks to ensure it is present regardless while true; do
if command -v dialog &> /dev/null ; then if $tui; then
do_dialog # .dialogrc must be located in the same directory as the invoke.sh script
else export DIALOGRC="./.dialogrc"
do_line_input do_dialog
fi else
do_line_input
fi
done
else # in developer console else # in developer console
python --version python --version
echo "Press ^D to exit" printf "Press ^D to exit\n"
export PS1="(InvokeAI) \u@\h \w> " export PS1="(InvokeAI) \u@\h \w> "
fi fi

View File

@ -1288,8 +1288,14 @@ def check_internet() -> bool:
# This routine performs any patch-ups needed after installation # This routine performs any patch-ups needed after installation
def run_patches(): def run_patches():
# install ckpt configuration files that may have been added to the install_missing_config_files()
# distro after original root directory configuration update_launchers()
def install_missing_config_files():
"""
install ckpt configuration files that may have been added to the
distro after original root directory configuration
"""
import invokeai.configs as conf import invokeai.configs as conf
from shutil import copyfile from shutil import copyfile
@ -1300,6 +1306,20 @@ def run_patches():
if not dest.exists(): if not dest.exists():
copyfile(src,dest) copyfile(src,dest)
def update_launchers():
"""
Make any updates to the launcher .sh and .bat scripts that may be needed
from release to release. This is not an elegant solution. Instead, the
launcher should be moved into the source tree and installed by pip.
"""
if sys.platform == "linux" \
and not Path(Globals.root,'.dialogrc').exists():
print('>> Downloading new version of launcher script and its config file')
from ldm.util import download_with_progress_bar
url_base = 'https://raw.githubusercontent.com/invoke-ai/InvokeAI/v2.3.3-rc1/installer/templates/'
download_with_progress_bar(url_base+'invoke.sh.in',Path(Globals.root,'invoke.sh'))
download_with_progress_bar(url_base+'dialogrc',Path(Globals.root,'.dialogrc'))
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -1,2 +1,2 @@
__version__='2.3.3-rc1' __version__='2.3.3-rc2'

View File

@ -346,10 +346,6 @@ def download_with_resume(url: str, dest: Path, access_token: str = None) -> Path
print(f"* {dest}: Downloading...") print(f"* {dest}: Downloading...")
try: try:
if content_length < 2000:
print(f"*** ERROR DOWNLOADING {url}: {resp.text}")
return None
with open(dest, open_mode) as file, tqdm( with open(dest, open_mode) as file, tqdm(
desc=str(dest), desc=str(dest),
initial=exist_size, initial=exist_size,