merge with main

This commit is contained in:
Lincoln Stein 2023-06-05 22:03:44 -04:00
commit 90333c0074
45 changed files with 377 additions and 9254 deletions

View File

@ -1,164 +0,0 @@
@echo off
@rem This script will install git (if not found on the PATH variable)
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
@rem For users who already have git, this step will be skipped.
@rem Next, it'll download the project's source code.
@rem Then it will download a self-contained, standalone Python and unpack it.
@rem Finally, it'll create the Python virtual environment and preload the models.
@rem This enables a user to install this project without manually installing git or Python
@rem change to the script's directory
PUSHD "%~dp0"
set "no_cache_dir=--no-cache-dir"
if "%1" == "use-cache" (
set "no_cache_dir="
)
echo ***** Installing InvokeAI.. *****
@rem Config
set INSTALL_ENV_DIR=%cd%\installer_files\env
@rem https://mamba.readthedocs.io/en/latest/installation.html
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
set PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
set PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-x86_64-pc-windows-msvc-shared-install_only.tar.gz
set PACKAGES_TO_INSTALL=
call git --version >.tmp1 2>.tmp2
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
@rem Cleanup
del /q .tmp1 .tmp2
@rem (if necessary) install git into a contained environment
if "%PACKAGES_TO_INSTALL%" NEQ "" (
@rem download micromamba
echo ***** Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to micromamba.exe *****
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > micromamba.exe
@rem test the mamba binary
echo ***** Micromamba version: *****
call micromamba.exe --version
@rem create the installer env
if not exist "%INSTALL_ENV_DIR%" (
call micromamba.exe create -y --prefix "%INSTALL_ENV_DIR%"
)
echo ***** Packages to install:%PACKAGES_TO_INSTALL% *****
call micromamba.exe install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
if not exist "%INSTALL_ENV_DIR%" (
echo ----- There was a problem while installing "%PACKAGES_TO_INSTALL%" using micromamba. Cannot continue. -----
pause
exit /b
)
)
del /q micromamba.exe
@rem For 'git' only
set PATH=%INSTALL_ENV_DIR%\Library\bin;%PATH%
@rem Download/unpack/clean up InvokeAI release sourceball
set err_msg=----- InvokeAI source download failed -----
echo Trying to download "%RELEASE_URL%%RELEASE_SOURCEBALL%"
curl -L %RELEASE_URL%%RELEASE_SOURCEBALL% --output InvokeAI.tgz
if %errorlevel% neq 0 goto err_exit
set err_msg=----- InvokeAI source unpack failed -----
tar -zxf InvokeAI.tgz
if %errorlevel% neq 0 goto err_exit
del /q InvokeAI.tgz
set err_msg=----- InvokeAI source copy failed -----
cd InvokeAI-*
xcopy . .. /e /h
if %errorlevel% neq 0 goto err_exit
cd ..
@rem cleanup
for /f %%i in ('dir /b InvokeAI-*') do rd /s /q %%i
rd /s /q .dev_scripts .github docker-build tests
del /q requirements.in requirements-mkdocs.txt shell.nix
echo ***** Unpacked InvokeAI source *****
@rem Download/unpack/clean up python-build-standalone
set err_msg=----- Python download failed -----
curl -L %PYTHON_BUILD_STANDALONE_URL%/%PYTHON_BUILD_STANDALONE% --output python.tgz
if %errorlevel% neq 0 goto err_exit
set err_msg=----- Python unpack failed -----
tar -zxf python.tgz
if %errorlevel% neq 0 goto err_exit
del /q python.tgz
echo ***** Unpacked python-build-standalone *****
@rem create venv
set err_msg=----- problem creating venv -----
.\python\python -E -s -m venv .venv
if %errorlevel% neq 0 goto err_exit
call .venv\Scripts\activate.bat
echo ***** Created Python virtual environment *****
@rem Print venv's Python version
set err_msg=----- problem calling venv's python -----
echo We're running under
.venv\Scripts\python --version
if %errorlevel% neq 0 goto err_exit
set err_msg=----- pip update failed -----
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location --upgrade pip wheel
if %errorlevel% neq 0 goto err_exit
echo ***** Updated pip and wheel *****
set err_msg=----- requirements file copy failed -----
copy binary_installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
if %errorlevel% neq 0 goto err_exit
set err_msg=----- main pip install failed -----
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -r requirements.txt
if %errorlevel% neq 0 goto err_exit
echo ***** Installed Python dependencies *****
set err_msg=----- InvokeAI setup failed -----
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -e .
if %errorlevel% neq 0 goto err_exit
copy binary_installer\invoke.bat.in .\invoke.bat
echo ***** Installed invoke launcher script ******
@rem more cleanup
rd /s /q binary_installer installer_files
@rem preload the models
call .venv\Scripts\python ldm\invoke\config\invokeai_configure.py
set err_msg=----- model download clone failed -----
if %errorlevel% neq 0 goto err_exit
deactivate
echo ***** Finished downloading models *****
echo All done! Execute the file invoke.bat in this directory to start InvokeAI
pause
exit
:err_exit
echo %err_msg%
pause
exit

View File

@ -1,235 +0,0 @@
#!/usr/bin/env bash
# ensure we're in the correct folder in case user's CWD is somewhere else
scriptdir=$(dirname "$0")
cd "$scriptdir"
set -euo pipefail
IFS=$'\n\t'
function _err_exit {
if test "$1" -ne 0
then
echo -e "Error code $1; Error caught was '$2'"
read -p "Press any key to exit..."
exit
fi
}
# This script will install git (if not found on the PATH variable)
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
# For users who already have git, this step will be skipped.
# Next, it'll download the project's source code.
# Then it will download a self-contained, standalone Python and unpack it.
# Finally, it'll create the Python virtual environment and preload the models.
# This enables a user to install this project without manually installing git or Python
echo -e "\n***** Installing InvokeAI into $(pwd)... *****\n"
export no_cache_dir="--no-cache-dir"
if [ $# -ge 1 ]; then
if [ "$1" = "use-cache" ]; then
export no_cache_dir=""
fi
fi
OS_NAME=$(uname -s)
case "${OS_NAME}" in
Linux*) OS_NAME="linux";;
Darwin*) OS_NAME="darwin";;
*) echo -e "\n----- Unknown OS: $OS_NAME! This script runs only on Linux or macOS -----\n" && exit
esac
OS_ARCH=$(uname -m)
case "${OS_ARCH}" in
x86_64*) ;;
arm64*) ;;
*) echo -e "\n----- Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64 -----\n" && exit
esac
# https://mamba.readthedocs.io/en/latest/installation.html
MAMBA_OS_NAME=$OS_NAME
MAMBA_ARCH=$OS_ARCH
if [ "$OS_NAME" == "darwin" ]; then
MAMBA_OS_NAME="osx"
fi
if [ "$OS_ARCH" == "linux" ]; then
MAMBA_ARCH="aarch64"
fi
if [ "$OS_ARCH" == "x86_64" ]; then
MAMBA_ARCH="64"
fi
PY_ARCH=$OS_ARCH
if [ "$OS_ARCH" == "arm64" ]; then
PY_ARCH="aarch64"
fi
# Compute device ('cd' segment of reqs files) detect goes here
# This needs a ton of work
# Suggestions:
# - lspci
# - check $PATH for nvidia-smi, gtt CUDA/GPU version from output
# - Surely there's a similar utility for AMD?
CD="cuda"
if [ "$OS_NAME" == "darwin" ] && [ "$OS_ARCH" == "arm64" ]; then
CD="mps"
fi
# config
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${MAMBA_OS_NAME}-${MAMBA_ARCH}/latest"
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
if [ "$OS_NAME" == "darwin" ]; then
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-apple-darwin-install_only.tar.gz
elif [ "$OS_NAME" == "linux" ]; then
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
fi
echo "INSTALLING $RELEASE_SOURCEBALL FROM $RELEASE_URL"
PACKAGES_TO_INSTALL=""
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
# (if necessary) install git and conda into a contained environment
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
# download micromamba
echo -e "\n***** Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to micromamba *****\n"
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvjO bin/micromamba > micromamba
chmod u+x ./micromamba
# test the mamba binary
echo -e "\n***** Micromamba version: *****\n"
./micromamba --version
# create the installer env
if [ ! -e "$INSTALL_ENV_DIR" ]; then
./micromamba create -y --prefix "$INSTALL_ENV_DIR"
fi
echo -e "\n***** Packages to install:$PACKAGES_TO_INSTALL *****\n"
./micromamba install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge "$PACKAGES_TO_INSTALL"
if [ ! -e "$INSTALL_ENV_DIR" ]; then
echo -e "\n----- There was a problem while initializing micromamba. Cannot continue. -----\n"
exit
fi
fi
rm -f micromamba.exe
export PATH="$INSTALL_ENV_DIR/bin:$PATH"
# Download/unpack/clean up InvokeAI release sourceball
_err_msg="\n----- InvokeAI source download failed -----\n"
curl -L $RELEASE_URL/$RELEASE_SOURCEBALL --output InvokeAI.tgz
_err_exit $? _err_msg
_err_msg="\n----- InvokeAI source unpack failed -----\n"
tar -zxf InvokeAI.tgz
_err_exit $? _err_msg
rm -f InvokeAI.tgz
_err_msg="\n----- InvokeAI source copy failed -----\n"
cd InvokeAI-*
cp -r . ..
_err_exit $? _err_msg
cd ..
# cleanup
rm -rf InvokeAI-*/
rm -rf .dev_scripts/ .github/ docker-build/ tests/ requirements.in requirements-mkdocs.txt shell.nix
echo -e "\n***** Unpacked InvokeAI source *****\n"
# Download/unpack/clean up python-build-standalone
_err_msg="\n----- Python download failed -----\n"
curl -L $PYTHON_BUILD_STANDALONE_URL/$PYTHON_BUILD_STANDALONE --output python.tgz
_err_exit $? _err_msg
_err_msg="\n----- Python unpack failed -----\n"
tar -zxf python.tgz
_err_exit $? _err_msg
rm -f python.tgz
echo -e "\n***** Unpacked python-build-standalone *****\n"
# create venv
_err_msg="\n----- problem creating venv -----\n"
if [ "$OS_NAME" == "darwin" ]; then
# patch sysconfig so that extensions can build properly
# adapted from https://github.com/cashapp/hermit-packages/commit/fcba384663892f4d9cfb35e8639ff7a28166ee43
PYTHON_INSTALL_DIR="$(pwd)/python"
SYSCONFIG="$(echo python/lib/python*/_sysconfigdata_*.py)"
TMPFILE="$(mktemp)"
chmod +w "${SYSCONFIG}"
cp "${SYSCONFIG}" "${TMPFILE}"
sed "s,'/install,'${PYTHON_INSTALL_DIR},g" "${TMPFILE}" > "${SYSCONFIG}"
rm -f "${TMPFILE}"
fi
./python/bin/python3 -E -s -m venv .venv
_err_exit $? _err_msg
source .venv/bin/activate
echo -e "\n***** Created Python virtual environment *****\n"
# Print venv's Python version
_err_msg="\n----- problem calling venv's python -----\n"
echo -e "We're running under"
.venv/bin/python3 --version
_err_exit $? _err_msg
_err_msg="\n----- pip update failed -----\n"
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location --upgrade pip
_err_exit $? _err_msg
echo -e "\n***** Updated pip *****\n"
_err_msg="\n----- requirements file copy failed -----\n"
cp binary_installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
_err_exit $? _err_msg
_err_msg="\n----- main pip install failed -----\n"
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -r requirements.txt
_err_exit $? _err_msg
echo -e "\n***** Installed Python dependencies *****\n"
_err_msg="\n----- InvokeAI setup failed -----\n"
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -e .
_err_exit $? _err_msg
echo -e "\n***** Installed InvokeAI *****\n"
cp binary_installer/invoke.sh.in ./invoke.sh
chmod a+rx ./invoke.sh
echo -e "\n***** Installed invoke launcher script ******\n"
# more cleanup
rm -rf binary_installer/ installer_files/
# preload the models
.venv/bin/python3 scripts/configure_invokeai.py
_err_msg="\n----- model download clone failed -----\n"
_err_exit $? _err_msg
deactivate
echo -e "\n***** Finished downloading models *****\n"
echo "All done! Run the command"
echo " $scriptdir/invoke.sh"
echo "to start InvokeAI."
read -p "Press any key to exit..."
exit

View File

@ -1,36 +0,0 @@
@echo off
PUSHD "%~dp0"
call .venv\Scripts\activate.bat
echo Do you want to generate images using the
echo 1. command-line
echo 2. browser-based UI
echo OR
echo 3. open the developer console
set /p choice="Please enter 1, 2 or 3: "
if /i "%choice%" == "1" (
echo Starting the InvokeAI command-line.
.venv\Scripts\python scripts\invoke.py %*
) else if /i "%choice%" == "2" (
echo Starting the InvokeAI browser-based UI.
.venv\Scripts\python scripts\invoke.py --web %*
) else if /i "%choice%" == "3" (
echo Developer Console
echo Python command is:
where python
echo Python version is:
python --version
echo *************************
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
echo so that you can troubleshoot this InvokeAI installation as necessary.
echo *************************
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
call cmd /k
) else (
echo Invalid selection
pause
exit /b
)
deactivate

View File

@ -1,46 +0,0 @@
#!/usr/bin/env sh
set -eu
. .venv/bin/activate
# set required env var for torch on mac MPS
if [ "$(uname -s)" == "Darwin" ]; then
export PYTORCH_ENABLE_MPS_FALLBACK=1
fi
echo "Do you want to generate images using the"
echo "1. command-line"
echo "2. browser-based UI"
echo "OR"
echo "3. open the developer console"
echo "Please enter 1, 2, or 3:"
read choice
case $choice in
1)
printf "\nStarting the InvokeAI command-line..\n";
.venv/bin/python scripts/invoke.py $*;
;;
2)
printf "\nStarting the InvokeAI browser-based UI..\n";
.venv/bin/python scripts/invoke.py --web $*;
;;
3)
printf "\nDeveloper Console:\n";
printf "Python command is:\n\t";
which python;
printf "Python version is:\n\t";
python --version;
echo "*************************"
echo "You are now in your user shell ($SHELL) with the local InvokeAI Python virtual environment activated,";
echo "so that you can troubleshoot this InvokeAI installation as necessary.";
printf "*************************\n"
echo "*** Type \`exit\` to quit this shell and deactivate the Python virtual environment *** ";
/usr/bin/env "$SHELL";
;;
*)
echo "Invalid selection";
exit
;;
esac

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +0,0 @@
InvokeAI
Project homepage: https://github.com/invoke-ai/InvokeAI
Installation on Windows:
NOTE: You might need to enable Windows Long Paths. If you're not sure,
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
file. Note that you will need to have admin privileges in order to
do this.
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
Installation on Linux and Mac:
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
file (on Linux/Mac) to start InvokeAI.

View File

@ -1,33 +0,0 @@
--prefer-binary
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
--extra-index-url https://download.pytorch.org/whl/cu116
--trusted-host https://download.pytorch.org
accelerate~=0.15
albumentations
diffusers[torch]~=0.11
einops
eventlet
flask_cors
flask_socketio
flaskwebgui==1.0.3
getpass_asterisk
imageio-ffmpeg
pyreadline3
realesrgan
send2trash
streamlit
taming-transformers-rom1504
test-tube
torch-fidelity
torch==1.12.1 ; platform_system == 'Darwin'
torch==1.12.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
torchvision==0.13.1 ; platform_system == 'Darwin'
torchvision==0.13.0+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
transformers
picklescan
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip
https://github.com/invoke-ai/GFPGAN/archive/3f5d2397361199bc4a91c08bb7d80f04d7805615.zip ; platform_system=='Windows'
https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system=='Linux' or platform_system=='Darwin'
https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip
https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip

View File

@ -43,7 +43,8 @@ socket_io = SocketIO(app)
# initialize config # initialize config
# this is a module global # this is a module global
app_config = InvokeAIAppConfig() app_config = InvokeAIAppConfig.get_config()
app_config.parse_args()
# Add startup event to load dependencies # Add startup event to load dependencies
@app.on_event("startup") @app.on_event("startup")

View File

@ -38,7 +38,7 @@ from .services.invocation_services import InvocationServices
from .services.invoker import Invoker from .services.invoker import Invoker
from .services.processor import DefaultInvocationProcessor from .services.processor import DefaultInvocationProcessor
from .services.sqlite import SqliteItemStorage from .services.sqlite import SqliteItemStorage
from .services.config import get_invokeai_config from .services.config import InvokeAIAppConfig
class CliCommand(BaseModel): class CliCommand(BaseModel):
command: Union[BaseCommand.get_commands() + BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore command: Union[BaseCommand.get_commands() + BaseInvocation.get_invocations()] = Field(discriminator="type") # type: ignore
@ -197,7 +197,8 @@ logger = logger.InvokeAILogger.getLogger()
def invoke_cli(): def invoke_cli():
# this gets the basic configuration # this gets the basic configuration
config = get_invokeai_config() config = InvokeAIAppConfig.get_config()
config.parse_args()
# get the optional list of invocations to execute on the command line # get the optional list of invocations to execute on the command line
parser = config.get_parser() parser = config.get_parser()

View File

@ -51,18 +51,32 @@ in INVOKEAI_ROOT. You can replace supersede this by providing any
OmegaConf dictionary object initialization time: OmegaConf dictionary object initialization time:
omegaconf = OmegaConf.load('/tmp/init.yaml') omegaconf = OmegaConf.load('/tmp/init.yaml')
conf = InvokeAIAppConfig(conf=omegaconf) conf = InvokeAIAppConfig()
conf.parse_args(conf=omegaconf)
By default, InvokeAIAppConfig will parse the contents of `sys.argv` at InvokeAIAppConfig.parse_args() will parse the contents of `sys.argv`
initialization time. You may pass a list of strings in the optional at initialization time. You may pass a list of strings in the optional
`argv` argument to use instead of the system argv: `argv` argument to use instead of the system argv:
conf = InvokeAIAppConfig(arg=['--xformers_enabled']) conf.parse_args(argv=['--xformers_enabled'])
It is also possible to set a value at initialization time. This value It is also possible to set a value at initialization time. However, if
has highest priority. you call parse_args() it may be overwritten.
conf = InvokeAIAppConfig(xformers_enabled=True) conf = InvokeAIAppConfig(xformers_enabled=True)
conf.parse_args(argv=['--no-xformers'])
conf.xformers_enabled
# False
To avoid this, use `get_config()` to retrieve the application-wide
configuration object. This will retain any properties set at object
creation time:
conf = InvokeAIAppConfig.get_config(xformers_enabled=True)
conf.parse_args(argv=['--no-xformers'])
conf.xformers_enabled
# True
Any setting can be overwritten by setting an environment variable of Any setting can be overwritten by setting an environment variable of
form: "INVOKEAI_<setting>", as in: form: "INVOKEAI_<setting>", as in:
@ -76,18 +90,23 @@ Order of precedence (from highest):
4) config file options 4) config file options
5) pydantic defaults 5) pydantic defaults
Typical usage: Typical usage at the top level file:
from invokeai.app.services.config import InvokeAIAppConfig from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.invocations.generate import TextToImageInvocation
# get global configuration and print its nsfw_checker value # get global configuration and print its nsfw_checker value
conf = InvokeAIAppConfig() conf = InvokeAIAppConfig.get_config()
conf.parse_args()
print(conf.nsfw_checker)
Typical usage in a backend module:
from invokeai.app.services.config import InvokeAIAppConfig
# get global configuration and print its nsfw_checker value
conf = InvokeAIAppConfig.get_config()
print(conf.nsfw_checker) print(conf.nsfw_checker)
# get the text2image invocation and print its step value
text2image = TextToImageInvocation()
print(text2image.steps)
Computed properties: Computed properties:
@ -103,10 +122,11 @@ a Path object:
lora_path - path to the LoRA directory lora_path - path to the LoRA directory
In most cases, you will want to create a single InvokeAIAppConfig In most cases, you will want to create a single InvokeAIAppConfig
object for the entire application. The get_invokeai_config() function object for the entire application. The InvokeAIAppConfig.get_config() function
does this: does this:
config = get_invokeai_config() config = InvokeAIAppConfig.get_config()
config.parse_args() # read values from the command line/config file
print(config.root) print(config.root)
# Subclassing # Subclassing
@ -140,7 +160,9 @@ two configs are kept in separate sections of the config file:
legacy_conf_dir: configs/stable-diffusion legacy_conf_dir: configs/stable-diffusion
outdir: outputs outdir: outputs
... ...
''' '''
from __future__ import annotations
import argparse import argparse
import pydoc import pydoc
import os import os
@ -155,9 +177,6 @@ INIT_FILE = Path('invokeai.yaml')
DB_FILE = Path('invokeai.db') DB_FILE = Path('invokeai.db')
LEGACY_INIT_FILE = Path('invokeai.init') LEGACY_INIT_FILE = Path('invokeai.init')
# This global stores a singleton InvokeAIAppConfig configuration object
global_config = None
class InvokeAISettings(BaseSettings): class InvokeAISettings(BaseSettings):
''' '''
Runtime configuration settings in which default values are Runtime configuration settings in which default values are
@ -330,6 +349,9 @@ the command-line client (recommended for experts only), or
can be changed by editing the file "INVOKEAI_ROOT/invokeai.yaml" or by can be changed by editing the file "INVOKEAI_ROOT/invokeai.yaml" or by
setting environment variables INVOKEAI_<setting>. setting environment variables INVOKEAI_<setting>.
''' '''
singleton_config: ClassVar[InvokeAIAppConfig] = None
singleton_init: ClassVar[Dict] = None
#fmt: off #fmt: off
type: Literal["InvokeAI"] = "InvokeAI" type: Literal["InvokeAI"] = "InvokeAI"
host : str = Field(default="127.0.0.1", description="IP address to bind to", category='Web Server') host : str = Field(default="127.0.0.1", description="IP address to bind to", category='Web Server')
@ -376,18 +398,17 @@ setting environment variables INVOKEAI_<setting>.
log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="debug", description="Emit logging messages at this level or higher", category="Logging") log_level : Literal[tuple(["debug","info","warning","error","critical"])] = Field(default="debug", description="Emit logging messages at this level or higher", category="Logging")
#fmt: on #fmt: on
def __init__(self, conf: DictConfig = None, argv: List[str]=None, **kwargs): def parse_args(self, argv: List[str]=None, conf: DictConfig = None, clobber=False):
''' '''
Initialize InvokeAIAppconfig. Update settings with contents of init file, environment, and
command-line settings.
:param conf: alternate Omegaconf dictionary object :param conf: alternate Omegaconf dictionary object
:param argv: aternate sys.argv list :param argv: aternate sys.argv list
:param **kwargs: attributes to initialize with :param clobber: ovewrite any initialization parameters passed during initialization
''' '''
super().__init__(**kwargs)
# Set the runtime root directory. We parse command-line switches here # Set the runtime root directory. We parse command-line switches here
# in order to pick up the --root_dir option. # in order to pick up the --root_dir option.
self.parse_args(argv) super().parse_args(argv)
if conf is None: if conf is None:
try: try:
conf = OmegaConf.load(self.root_dir / INIT_FILE) conf = OmegaConf.load(self.root_dir / INIT_FILE)
@ -396,12 +417,24 @@ setting environment variables INVOKEAI_<setting>.
InvokeAISettings.initconf = conf InvokeAISettings.initconf = conf
# parse args again in order to pick up settings in configuration file # parse args again in order to pick up settings in configuration file
self.parse_args(argv) super().parse_args(argv)
# restore initialization values if self.singleton_init and not clobber:
hints = get_type_hints(self) hints = get_type_hints(self.__class__)
for k in kwargs: for k in self.singleton_init:
setattr(self,k,parse_obj_as(hints[k],kwargs[k])) setattr(self,k,parse_obj_as(hints[k],self.singleton_init[k]))
@classmethod
def get_config(cls,**kwargs)->InvokeAIAppConfig:
'''
This returns a singleton InvokeAIAppConfig configuration object.
'''
if cls.singleton_config is None \
or type(cls.singleton_config)!=cls \
or (kwargs and cls.singleton_init != kwargs):
cls.singleton_config = cls(**kwargs)
cls.singleton_init = kwargs
return cls.singleton_config
@property @property
def root_path(self)->Path: def root_path(self)->Path:
@ -541,11 +574,8 @@ class PagingArgumentParser(argparse.ArgumentParser):
text = self.format_help() text = self.format_help()
pydoc.pager(text) pydoc.pager(text)
def get_invokeai_config(cls:Type[InvokeAISettings]=InvokeAIAppConfig,**kwargs)->InvokeAIAppConfig: def get_invokeai_config(**kwargs)->InvokeAIAppConfig:
''' '''
This returns a singleton InvokeAIAppConfig configuration object. Legacy function which returns InvokeAIAppConfig.get_config()
''' '''
global global_config return InvokeAIAppConfig.get_config(**kwargs)
if global_config is None or type(global_config)!=cls:
global_config = cls(**kwargs)
return global_config

View File

@ -26,7 +26,6 @@ class SqliteItemStorage(ItemStorageABC, Generic[T]):
self._table_name = table_name self._table_name = table_name
self._id_field = id_field # TODO: validate that T has this field self._id_field = id_field # TODO: validate that T has this field
self._lock = Lock() self._lock = Lock()
self._conn = sqlite3.connect( self._conn = sqlite3.connect(
self._filename, check_same_thread=False self._filename, check_same_thread=False
) # TODO: figure out a better threading solution ) # TODO: figure out a better threading solution

View File

@ -6,7 +6,8 @@ be suppressed or deferred
""" """
import numpy as np import numpy as np
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
config = InvokeAIAppConfig.get_config()
class PatchMatch: class PatchMatch:
""" """
@ -21,7 +22,6 @@ class PatchMatch:
@classmethod @classmethod
def _load_patch_match(self): def _load_patch_match(self):
config = get_invokeai_config()
if self.tried_load: if self.tried_load:
return return
if config.try_patchmatch: if config.try_patchmatch:

View File

@ -33,10 +33,11 @@ from PIL import Image, ImageOps
from transformers import AutoProcessor, CLIPSegForImageSegmentation from transformers import AutoProcessor, CLIPSegForImageSegmentation
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined" CLIPSEG_MODEL = "CIDAS/clipseg-rd64-refined"
CLIPSEG_SIZE = 352 CLIPSEG_SIZE = 352
config = InvokeAIAppConfig.get_config()
class SegmentedGrayscale(object): class SegmentedGrayscale(object):
def __init__(self, image: Image, heatmap: torch.Tensor): def __init__(self, image: Image, heatmap: torch.Tensor):
@ -83,7 +84,6 @@ class Txt2Mask(object):
def __init__(self, device="cpu", refined=False): def __init__(self, device="cpu", refined=False):
logger.info("Initializing clipseg model for text to mask inference") logger.info("Initializing clipseg model for text to mask inference")
config = get_invokeai_config()
# BUG: we are not doing anything with the device option at this time # BUG: we are not doing anything with the device option at this time
self.device = device self.device = device

View File

@ -55,6 +55,8 @@ from invokeai.backend.install.model_install_backend import (
UserSelections, UserSelections,
) )
from invokeai.app.services.config import InvokeAIAppConfig
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
transformers.logging.set_verbosity_error() transformers.logging.set_verbosity_error()
@ -62,7 +64,7 @@ transformers.logging.set_verbosity_error()
# --------------------------globals----------------------- # --------------------------globals-----------------------
config = get_invokeai_config(argv=[]) config = InvokeAIAppConfig.get_config()
Model_dir = "models" Model_dir = "models"
Weights_dir = "ldm/stable-diffusion-v1/" Weights_dir = "ldm/stable-diffusion-v1/"
@ -301,7 +303,7 @@ def download_vaes():
if not hf_download_with_resume( if not hf_download_with_resume(
repo_id=repo_id, repo_id=repo_id,
model_name=model_name, model_name=model_name,
model_dir=str(config.root / Model_dir / Weights_dir), model_dir=str(config.root_path / Model_dir / Weights_dir),
): ):
raise Exception(f"download of {model_name} failed") raise Exception(f"download of {model_name} failed")
except Exception as e: except Exception as e:
@ -326,7 +328,7 @@ class editOptsForm(npyscreen.FormMultiPage):
def create(self): def create(self):
program_opts = self.parentApp.program_opts program_opts = self.parentApp.program_opts
old_opts = self.parentApp.invokeai_opts old_opts = self.parentApp.invokeai_opts
first_time = not (config.root / 'invokeai.yaml').exists() first_time = not (config.root_path / 'invokeai.yaml').exists()
access_token = HfFolder.get_token() access_token = HfFolder.get_token()
window_width, window_height = get_terminal_size() window_width, window_height = get_terminal_size()
for i in [ for i in [
@ -641,7 +643,7 @@ def edit_opts(program_opts: Namespace, invokeai_opts: Namespace) -> argparse.Nam
def default_startup_options(init_file: Path) -> Namespace: def default_startup_options(init_file: Path) -> Namespace:
opts = InvokeAIAppConfig(argv=[]) opts = InvokeAIAppConfig.get_config()
if not init_file.exists(): if not init_file.exists():
opts.nsfw_checker = True opts.nsfw_checker = True
return opts return opts
@ -709,10 +711,10 @@ def write_opts(opts: Namespace, init_file: Path):
""" """
Update the invokeai.yaml file with values from current settings. Update the invokeai.yaml file with values from current settings.
""" """
# this will load current settings
# this will load default settings new_config = InvokeAIAppConfig.get_config()
new_config = InvokeAIAppConfig(argv=[])
new_config.root = config.root new_config.root = config.root
for key,value in opts.__dict__.items(): for key,value in opts.__dict__.items():
if hasattr(new_config,key): if hasattr(new_config,key):
setattr(new_config,key,value) setattr(new_config,key,value)
@ -722,19 +724,19 @@ def write_opts(opts: Namespace, init_file: Path):
# ------------------------------------- # -------------------------------------
def default_output_dir() -> Path: def default_output_dir() -> Path:
return config.root / "outputs" return config.root_path / "outputs"
# ------------------------------------- # -------------------------------------
def default_embedding_dir() -> Path: def default_embedding_dir() -> Path:
return config.root / "embeddings" return config.root_path / "embeddings"
# ------------------------------------- # -------------------------------------
def default_lora_dir() -> Path: def default_lora_dir() -> Path:
return config.root / "loras" return config.root_path / "loras"
# ------------------------------------- # -------------------------------------
def default_controlnet_dir() -> Path: def default_controlnet_dir() -> Path:
return config.root / "controlnets" return config.root_path / "controlnets"
# ------------------------------------- # -------------------------------------
def write_default_options(program_opts: Namespace, initfile: Path): def write_default_options(program_opts: Namespace, initfile: Path):
@ -748,7 +750,7 @@ def write_default_options(program_opts: Namespace, initfile: Path):
# yaml format. # yaml format.
def migrate_init_file(legacy_format:Path): def migrate_init_file(legacy_format:Path):
old = legacy_parser.parse_args([f'@{str(legacy_format)}']) old = legacy_parser.parse_args([f'@{str(legacy_format)}'])
new = InvokeAIAppConfig(conf={}) new = InvokeAIAppConfig.get_config()
fields = list(get_type_hints(InvokeAIAppConfig).keys()) fields = list(get_type_hints(InvokeAIAppConfig).keys())
for attr in fields: for attr in fields:
@ -840,7 +842,8 @@ def main():
if old_init_file.exists() and not new_init_file.exists(): if old_init_file.exists() and not new_init_file.exists():
print('** Migrating invokeai.init to invokeai.yaml') print('** Migrating invokeai.init to invokeai.yaml')
migrate_init_file(old_init_file) migrate_init_file(old_init_file)
config.parse_args([]) # reread defaults # Load new init file into config
config.parse_args(argv=[],conf=OmegaConf.load(new_init_file))
if not config.model_conf_path.exists(): if not config.model_conf_path.exists():
initialize_rootdir(config.root, opt.yes_to_all) initialize_rootdir(config.root, opt.yes_to_all)
@ -877,7 +880,6 @@ def main():
if opt.skip_sd_weights: if opt.skip_sd_weights:
print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **") print("\n** SKIPPING DIFFUSION WEIGHTS DOWNLOAD PER USER REQUEST **")
elif models_to_download: elif models_to_download:
print(models_to_download)
print("\n** DOWNLOADING DIFFUSION WEIGHTS **") print("\n** DOWNLOADING DIFFUSION WEIGHTS **")
process_and_execute(opt, models_to_download) process_and_execute(opt, models_to_download)

View File

@ -20,14 +20,16 @@ from tqdm import tqdm
import invokeai.configs as configs import invokeai.configs as configs
from invokeai.app.services.config import get_invokeai_config
from invokeai.app.services.config import InvokeAIAppConfig
from ..stable_diffusion import StableDiffusionGeneratorPipeline from ..stable_diffusion import StableDiffusionGeneratorPipeline
from ..util.logging import InvokeAILogger from ..util.logging import InvokeAILogger
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
# --------------------------globals----------------------- # --------------------------globals-----------------------
config = get_invokeai_config(argv=[]) config = InvokeAIAppConfig.get_config()
Model_dir = "models" Model_dir = "models"
Weights_dir = "ldm/stable-diffusion-v1/" Weights_dir = "ldm/stable-diffusion-v1/"

View File

@ -26,7 +26,7 @@ import torch
from safetensors.torch import load_file from safetensors.torch import load_file
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
from .model_manager import ModelManager, SDLegacyType from .model_manager import ModelManager, SDLegacyType
@ -842,7 +842,7 @@ def convert_ldm_bert_checkpoint(checkpoint, config):
def convert_ldm_clip_checkpoint(checkpoint): def convert_ldm_clip_checkpoint(checkpoint):
text_model = CLIPTextModel.from_pretrained( text_model = CLIPTextModel.from_pretrained(
"openai/clip-vit-large-patch14", cache_dir=get_invokeai_config().cache_dir "openai/clip-vit-large-patch14", cache_dir=InvokeAIAppConfig.get_config().cache_dir
) )
keys = list(checkpoint.keys()) keys = list(checkpoint.keys())
@ -897,7 +897,7 @@ textenc_pattern = re.compile("|".join(protected.keys()))
def convert_paint_by_example_checkpoint(checkpoint): def convert_paint_by_example_checkpoint(checkpoint):
cache_dir = get_invokeai_config().cache_dir cache_dir = InvokeAIAppConfig.get_config().cache_dir
config = CLIPVisionConfig.from_pretrained( config = CLIPVisionConfig.from_pretrained(
"openai/clip-vit-large-patch14", cache_dir=cache_dir "openai/clip-vit-large-patch14", cache_dir=cache_dir
) )
@ -969,7 +969,7 @@ def convert_paint_by_example_checkpoint(checkpoint):
def convert_open_clip_checkpoint(checkpoint): def convert_open_clip_checkpoint(checkpoint):
cache_dir = get_invokeai_config().cache_dir cache_dir = InvokeAIAppConfig.get_config().cache_dir
text_model = CLIPTextModel.from_pretrained( text_model = CLIPTextModel.from_pretrained(
"stabilityai/stable-diffusion-2", subfolder="text_encoder", cache_dir=cache_dir "stabilityai/stable-diffusion-2", subfolder="text_encoder", cache_dir=cache_dir
) )
@ -1092,8 +1092,9 @@ def load_pipeline_from_original_stable_diffusion_ckpt(
:param vae: A diffusers VAE to load into the pipeline. :param vae: A diffusers VAE to load into the pipeline.
:param vae_path: Path to a checkpoint VAE that will be converted into diffusers and loaded into the pipeline. :param vae_path: Path to a checkpoint VAE that will be converted into diffusers and loaded into the pipeline.
""" """
invoke_config = get_invokeai_config() config = InvokeAIAppConfig.get_config()
cache_dir = invoke_config.cache_dir cache_dir = invoke_config.cache_dir
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter("ignore") warnings.simplefilter("ignore")
verbosity = dlogging.get_verbosity() verbosity = dlogging.get_verbosity()

View File

@ -49,7 +49,7 @@ from diffusers.pipelines.stable_diffusion.safety_checker import (
from ..stable_diffusion import ( from ..stable_diffusion import (
StableDiffusionGeneratorPipeline, StableDiffusionGeneratorPipeline,
) )
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
from ..install.model_install_backend import ( from ..install.model_install_backend import (
Dataset_path, Dataset_path,
hf_download_with_resume, hf_download_with_resume,
@ -104,7 +104,7 @@ class ModelManager(object):
if not isinstance(config, DictConfig): if not isinstance(config, DictConfig):
config = OmegaConf.load(config) config = OmegaConf.load(config)
self.config = config self.config = config
self.globals = get_invokeai_config() self.globals = InvokeAIAppConfig.get_config()
self.precision = precision self.precision = precision
self.device = torch.device(device_type) self.device = torch.device(device_type)
self.max_loaded_models = max_loaded_models self.max_loaded_models = max_loaded_models
@ -1063,7 +1063,7 @@ class ModelManager(object):
""" """
# Three transformer models to check: bert, clip and safety checker, and # Three transformer models to check: bert, clip and safety checker, and
# the diffusers as well # the diffusers as well
config = get_invokeai_config() config = InvokeAIAppConfig.get_config()
models_dir = config.root_dir / "models" models_dir = config.root_dir / "models"
legacy_locations = [ legacy_locations = [
Path( Path(
@ -1293,7 +1293,7 @@ class ModelManager(object):
@classmethod @classmethod
def _delete_model_from_cache(cls,repo_id): def _delete_model_from_cache(cls,repo_id):
cache_info = scan_cache_dir(get_invokeai_config().cache_dir) cache_info = scan_cache_dir(InvokeAIAppConfig.get_config().cache_dir)
# I'm sure there is a way to do this with comprehensions # I'm sure there is a way to do this with comprehensions
# but the code quickly became incomprehensible! # but the code quickly became incomprehensible!
@ -1310,7 +1310,7 @@ class ModelManager(object):
@staticmethod @staticmethod
def _abs_path(path: str | Path) -> Path: def _abs_path(path: str | Path) -> Path:
globals = get_invokeai_config() globals = InvokeAIAppConfig.get_config()
if path is None or Path(path).is_absolute(): if path is None or Path(path).is_absolute():
return path return path
return Path(globals.root_dir, path).resolve() return Path(globals.root_dir, path).resolve()

View File

@ -21,10 +21,12 @@ from compel.prompt_parser import (
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
from ..stable_diffusion import InvokeAIDiffuserComponent from ..stable_diffusion import InvokeAIDiffuserComponent
from ..util import torch_dtype from ..util import torch_dtype
config = InvokeAIAppConfig.get_config()
def get_uc_and_c_and_ec(prompt_string, def get_uc_and_c_and_ec(prompt_string,
model: InvokeAIDiffuserComponent, model: InvokeAIDiffuserComponent,
log_tokens=False, skip_normalize_legacy_blend=False): log_tokens=False, skip_normalize_legacy_blend=False):
@ -37,9 +39,7 @@ def get_uc_and_c_and_ec(prompt_string,
textual_inversion_manager=model.textual_inversion_manager, textual_inversion_manager=model.textual_inversion_manager,
dtype_for_device_getter=torch_dtype, dtype_for_device_getter=torch_dtype,
truncate_long_prompts=False, truncate_long_prompts=False,
) )
config = get_invokeai_config()
# get rid of any newline characters # get rid of any newline characters
prompt_string = prompt_string.replace("\n", " ") prompt_string = prompt_string.replace("\n", " ")

View File

@ -6,7 +6,7 @@ import numpy as np
import torch import torch
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
pretrained_model_url = ( pretrained_model_url = (
"https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth" "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
@ -18,7 +18,7 @@ class CodeFormerRestoration:
self, codeformer_dir="models/codeformer", codeformer_model_path="codeformer.pth" self, codeformer_dir="models/codeformer", codeformer_model_path="codeformer.pth"
) -> None: ) -> None:
self.globals = get_invokeai_config() self.globals = InvokeAIAppConfig.get_config()
codeformer_dir = self.globals.root_dir / codeformer_dir codeformer_dir = self.globals.root_dir / codeformer_dir
self.model_path = codeformer_dir / codeformer_model_path self.model_path = codeformer_dir / codeformer_model_path
self.codeformer_model_exists = self.model_path.exists() self.codeformer_model_exists = self.model_path.exists()

View File

@ -7,11 +7,11 @@ import torch
from PIL import Image from PIL import Image
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
class GFPGAN: class GFPGAN:
def __init__(self, gfpgan_model_path="models/gfpgan/GFPGANv1.4.pth") -> None: def __init__(self, gfpgan_model_path="models/gfpgan/GFPGANv1.4.pth") -> None:
self.globals = get_invokeai_config() self.globals = InvokeAIAppConfig.get_config()
if not os.path.isabs(gfpgan_model_path): if not os.path.isabs(gfpgan_model_path):
gfpgan_model_path = self.globals.root_dir / gfpgan_model_path gfpgan_model_path = self.globals.root_dir / gfpgan_model_path
self.model_path = gfpgan_model_path self.model_path = gfpgan_model_path

View File

@ -6,8 +6,8 @@ from PIL import Image
from PIL.Image import Image as ImageType from PIL.Image import Image as ImageType
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
config = get_invokeai_config() config = InvokeAIAppConfig.get_config()
class ESRGAN: class ESRGAN:
def __init__(self, bg_tile_size=400) -> None: def __init__(self, bg_tile_size=400) -> None:

View File

@ -15,9 +15,11 @@ from transformers import AutoFeatureExtractor
import invokeai.assets.web as web_assets import invokeai.assets.web as web_assets
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
from .util import CPU_DEVICE from .util import CPU_DEVICE
config = InvokeAIAppConfig.get_config()
class SafetyChecker(object): class SafetyChecker(object):
CAUTION_IMG = "caution.png" CAUTION_IMG = "caution.png"
@ -26,7 +28,6 @@ class SafetyChecker(object):
caution = Image.open(path) caution = Image.open(path)
self.caution_img = caution.resize((caution.width // 2, caution.height // 2)) self.caution_img = caution.resize((caution.width // 2, caution.height // 2))
self.device = device self.device = device
config = get_invokeai_config()
try: try:
safety_model_id = "CompVis/stable-diffusion-safety-checker" safety_model_id = "CompVis/stable-diffusion-safety-checker"

View File

@ -17,15 +17,16 @@ from huggingface_hub import (
hf_hub_url, hf_hub_url,
) )
import invokeai.backend.util.logging as logger from invokeai.backend.util.logging import InvokeAILogger
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
logger = InvokeAILogger.getLogger()
class HuggingFaceConceptsLibrary(object): class HuggingFaceConceptsLibrary(object):
def __init__(self, root=None): def __init__(self, root=None):
""" """
Initialize the Concepts object. May optionally pass a root directory. Initialize the Concepts object. May optionally pass a root directory.
""" """
self.config = get_invokeai_config() self.config = InvokeAIAppConfig.get_config()
self.root = root or self.config.root self.root = root or self.config.root
self.hf_api = HfApi() self.hf_api = HfApi()
self.local_concepts = dict() self.local_concepts = dict()

View File

@ -40,7 +40,7 @@ from torchvision.transforms.functional import resize as tv_resize
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
from typing_extensions import ParamSpec from typing_extensions import ParamSpec
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
from ..util import CPU_DEVICE, normalize_device from ..util import CPU_DEVICE, normalize_device
from .diffusion import ( from .diffusion import (
AttentionMapSaver, AttentionMapSaver,
@ -364,7 +364,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline):
""" """
if xformers is available, use it, otherwise use sliced attention. if xformers is available, use it, otherwise use sliced attention.
""" """
config = get_invokeai_config() config = InvokeAIAppConfig.get_config()
if ( if (
torch.cuda.is_available() torch.cuda.is_available()
and is_xformers_available() and is_xformers_available()

View File

@ -10,7 +10,7 @@ from diffusers.models.attention_processor import AttentionProcessor
from typing_extensions import TypeAlias from typing_extensions import TypeAlias
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
from .cross_attention_control import ( from .cross_attention_control import (
Arguments, Arguments,
@ -72,7 +72,7 @@ class InvokeAIDiffuserComponent:
:param model: the unet model to pass through to cross attention control :param model: the unet model to pass through to cross attention control
:param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning) :param model_forward_callback: a lambda with arguments (x, sigma, conditioning_to_apply). will be called repeatedly. most likely, this should simply call model.forward(x, sigma, conditioning)
""" """
config = get_invokeai_config() config = InvokeAIAppConfig.get_config()
self.conditioning = None self.conditioning = None
self.model = model self.model = model
self.is_running_diffusers = is_running_diffusers self.is_running_diffusers = is_running_diffusers
@ -112,23 +112,25 @@ class InvokeAIDiffuserComponent:
# TODO resuscitate attention map saving # TODO resuscitate attention map saving
# self.remove_attention_map_saving() # self.remove_attention_map_saving()
def override_cross_attention( # apparently unused code
self, conditioning: ExtraConditioningInfo, step_count: int # TODO: delete
) -> Dict[str, AttentionProcessor]: # def override_cross_attention(
""" # self, conditioning: ExtraConditioningInfo, step_count: int
setup cross attention .swap control. for diffusers this replaces the attention processor, so # ) -> Dict[str, AttentionProcessor]:
the previous attention processor is returned so that the caller can restore it later. # """
""" # setup cross attention .swap control. for diffusers this replaces the attention processor, so
self.conditioning = conditioning # the previous attention processor is returned so that the caller can restore it later.
self.cross_attention_control_context = Context( # """
arguments=self.conditioning.cross_attention_control_args, # self.conditioning = conditioning
step_count=step_count, # self.cross_attention_control_context = Context(
) # arguments=self.conditioning.cross_attention_control_args,
return override_cross_attention( # step_count=step_count,
self.model, # )
self.cross_attention_control_context, # return override_cross_attention(
is_running_diffusers=self.is_running_diffusers, # self.model,
) # self.cross_attention_control_context,
# is_running_diffusers=self.is_running_diffusers,
# )
def restore_default_cross_attention( def restore_default_cross_attention(
self, restore_attention_processor: Optional["AttentionProcessor"] = None self, restore_attention_processor: Optional["AttentionProcessor"] = None

View File

@ -88,7 +88,7 @@ def save_progress(
def parse_args(): def parse_args():
config = InvokeAIAppConfig(argv=[]) config = InvokeAIAppConfig.get_config()
parser = PagingArgumentParser( parser = PagingArgumentParser(
description="Textual inversion training" description="Textual inversion training"
) )

View File

@ -4,15 +4,15 @@ from contextlib import nullcontext
import torch import torch
from torch import autocast from torch import autocast
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
CPU_DEVICE = torch.device("cpu") CPU_DEVICE = torch.device("cpu")
CUDA_DEVICE = torch.device("cuda") CUDA_DEVICE = torch.device("cuda")
MPS_DEVICE = torch.device("mps") MPS_DEVICE = torch.device("mps")
config = InvokeAIAppConfig.get_config()
def choose_torch_device() -> torch.device: def choose_torch_device() -> torch.device:
"""Convenience routine for guessing which GPU device to run model on""" """Convenience routine for guessing which GPU device to run model on"""
config = get_invokeai_config()
if config.always_use_cpu: if config.always_use_cpu:
return CPU_DEVICE return CPU_DEVICE
if torch.cuda.is_available(): if torch.cuda.is_available():
@ -32,7 +32,6 @@ def choose_precision(device: torch.device) -> str:
def torch_dtype(device: torch.device) -> torch.dtype: def torch_dtype(device: torch.device) -> torch.dtype:
config = get_invokeai_config()
if config.full_precision: if config.full_precision:
return torch.float32 return torch.float32
if choose_precision(device) == "float16": if choose_precision(device) == "float16":

View File

@ -52,13 +52,13 @@ from invokeai.frontend.install.widgets import (
set_min_terminal_size, set_min_terminal_size,
select_stable_diffusion_config_file, select_stable_diffusion_config_file,
) )
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
# minimum size for the UI # minimum size for the UI
MIN_COLS = 140 MIN_COLS = 140
MIN_LINES = 50 MIN_LINES = 50
config = get_invokeai_config() config = InvokeAIAppConfig.get_config()
# build a table mapping all non-printable characters to None # build a table mapping all non-printable characters to None
# for stripping control characters # for stripping control characters
@ -679,7 +679,6 @@ class AddModelApplication(npyscreen.NPSAppManaged):
self.user_selections = UserSelections() self.user_selections = UserSelections()
def onStart(self): def onStart(self):
print('here i am')
npyscreen.setTheme(npyscreen.Themes.DefaultTheme) npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
self.main_form = self.addForm( self.main_form = self.addForm(
"MAIN", addModelsForm, name="Install Stable Diffusion Models", cycle_widgets=True, "MAIN", addModelsForm, name="Install Stable Diffusion Models", cycle_widgets=True,

View File

@ -20,12 +20,12 @@ from npyscreen import widget
from omegaconf import OmegaConf from omegaconf import OmegaConf
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.services.config import get_invokeai_config from invokeai.services.config import InvokeAIAppConfig
from ...backend.model_management import ModelManager from ...backend.model_management import ModelManager
from ...frontend.install.widgets import FloatTitleSlider from ...frontend.install.widgets import FloatTitleSlider
DEST_MERGED_MODEL_DIR = "merged_models" DEST_MERGED_MODEL_DIR = "merged_models"
config = get_invokeai_config() config = InvokeAIAppConfig.get_config()
def merge_diffusion_models( def merge_diffusion_models(
model_ids_or_paths: List[Union[str, Path]], model_ids_or_paths: List[Union[str, Path]],

View File

@ -22,7 +22,7 @@ from omegaconf import OmegaConf
import invokeai.backend.util.logging as logger import invokeai.backend.util.logging as logger
from invokeai.app.services.config import get_invokeai_config from invokeai.app.services.config import InvokeAIAppConfig
from ...backend.training import ( from ...backend.training import (
do_textual_inversion_training, do_textual_inversion_training,
parse_args parse_args
@ -423,7 +423,7 @@ def do_front_end(args: Namespace):
save_args(args) save_args(args)
try: try:
do_textual_inversion_training(get_invokeai_config(),**args) do_textual_inversion_training(InvokeAIAppConfig.get_config(),**args)
copy_to_embeddings_folder(args) copy_to_embeddings_folder(args)
except Exception as e: except Exception as e:
logger.error("An exception occurred during training. The exception was:") logger.error("An exception occurred during training. The exception was:")
@ -436,7 +436,7 @@ def main():
global config global config
args = parse_args() args = parse_args()
config = get_invokeai_config(argv=[]) config = InvokeAIAppConfig.get_config()
# change root if needed # change root if needed
if args.root_dir: if args.root_dir:

View File

@ -46,9 +46,12 @@ const ImageDndContext = (props: ImageDndContextProps) => {
const touchSensor = useSensor(TouchSensor, { const touchSensor = useSensor(TouchSensor, {
activationConstraint: { distance: 15 }, activationConstraint: { distance: 15 },
}); });
const keyboardSensor = useSensor(KeyboardSensor); // TODO: Use KeyboardSensor - needs composition of multiple collisionDetection algos
// Alternatively, fix `rectIntersection` collection detection to work with the drag overlay
// (currently the drag element collision rect is not correctly calculated)
// const keyboardSensor = useSensor(KeyboardSensor);
const sensors = useSensors(mouseSensor, touchSensor, keyboardSensor); const sensors = useSensors(mouseSensor, touchSensor);
return ( return (
<DndContext <DndContext

View File

@ -1,23 +1,17 @@
import { import { Box, Flex, Icon, IconButtonProps, Image } from '@chakra-ui/react';
Box,
Flex,
Icon,
IconButtonProps,
Image,
Text,
} from '@chakra-ui/react';
import { useDraggable, useDroppable } from '@dnd-kit/core'; import { useDraggable, useDroppable } from '@dnd-kit/core';
import { useCombinedRefs } from '@dnd-kit/utilities'; import { useCombinedRefs } from '@dnd-kit/utilities';
import IAIIconButton from 'common/components/IAIIconButton'; import IAIIconButton from 'common/components/IAIIconButton';
import { IAIImageFallback } from 'common/components/IAIImageFallback'; import { IAIImageFallback } from 'common/components/IAIImageFallback';
import ImageMetadataOverlay from 'common/components/ImageMetadataOverlay'; import ImageMetadataOverlay from 'common/components/ImageMetadataOverlay';
import { useGetUrl } from 'common/util/getUrl'; import { useGetUrl } from 'common/util/getUrl';
import { AnimatePresence, motion } from 'framer-motion'; import { AnimatePresence } from 'framer-motion';
import { ReactElement, SyntheticEvent } from 'react'; import { ReactElement, SyntheticEvent } from 'react';
import { memo, useRef } from 'react'; import { memo, useRef } from 'react';
import { FaImage, FaTimes } from 'react-icons/fa'; import { FaImage, FaTimes } from 'react-icons/fa';
import { ImageDTO } from 'services/api'; import { ImageDTO } from 'services/api';
import { v4 as uuidv4 } from 'uuid'; import { v4 as uuidv4 } from 'uuid';
import IAIDropOverlay from './IAIDropOverlay';
type IAIDndImageProps = { type IAIDndImageProps = {
image: ImageDTO | null | undefined; image: ImageDTO | null | undefined;
@ -138,7 +132,7 @@ const IAIDndImage = (props: IAIDndImageProps) => {
</Box> </Box>
)} )}
<AnimatePresence> <AnimatePresence>
{active && <DropOverlay isOver={isOver} />} {active && <IAIDropOverlay isOver={isOver} />}
</AnimatePresence> </AnimatePresence>
</Flex> </Flex>
)} )}
@ -164,7 +158,7 @@ const IAIDndImage = (props: IAIDndImageProps) => {
/> />
</Flex> </Flex>
<AnimatePresence> <AnimatePresence>
{active && <DropOverlay isOver={isOver} />} {active && <IAIDropOverlay isOver={isOver} />}
</AnimatePresence> </AnimatePresence>
</> </>
)} )}
@ -173,86 +167,3 @@ const IAIDndImage = (props: IAIDndImageProps) => {
}; };
export default memo(IAIDndImage); export default memo(IAIDndImage);
type DropOverlayProps = {
isOver: boolean;
};
const DropOverlay = (props: DropOverlayProps) => {
const { isOver } = props;
return (
<motion.div
key="statusText"
initial={{
opacity: 0,
}}
animate={{
opacity: 1,
transition: { duration: 0.1 },
}}
exit={{
opacity: 0,
transition: { duration: 0.1 },
}}
>
<Flex
sx={{
position: 'absolute',
top: 0,
left: 0,
w: 'full',
h: 'full',
}}
>
<Flex
sx={{
position: 'absolute',
top: 0,
left: 0,
w: 'full',
h: 'full',
bg: 'base.900',
opacity: 0.7,
borderRadius: 'base',
alignItems: 'center',
justifyContent: 'center',
transitionProperty: 'common',
transitionDuration: '0.1s',
}}
/>
<Flex
sx={{
position: 'absolute',
top: 0,
left: 0,
w: 'full',
h: 'full',
opacity: 1,
borderWidth: 2,
borderColor: isOver ? 'base.200' : 'base.500',
borderRadius: 'base',
borderStyle: 'dashed',
transitionProperty: 'common',
transitionDuration: '0.1s',
alignItems: 'center',
justifyContent: 'center',
}}
>
<Text
sx={{
fontSize: '2xl',
fontWeight: 600,
transform: isOver ? 'scale(1.1)' : 'scale(1)',
color: isOver ? 'base.100' : 'base.500',
transitionProperty: 'common',
transitionDuration: '0.1s',
}}
>
Drop
</Text>
</Flex>
</Flex>
</motion.div>
);
};

View File

@ -0,0 +1,91 @@
import { Flex, Text } from '@chakra-ui/react';
import { motion } from 'framer-motion';
import { memo, useRef } from 'react';
import { v4 as uuidv4 } from 'uuid';
type Props = {
isOver: boolean;
label?: string;
};
export const IAIDropOverlay = (props: Props) => {
const { isOver, label = 'Drop' } = props;
const motionId = useRef(uuidv4());
return (
<motion.div
key={motionId.current}
initial={{
opacity: 0,
}}
animate={{
opacity: 1,
transition: { duration: 0.1 },
}}
exit={{
opacity: 0,
transition: { duration: 0.1 },
}}
>
<Flex
sx={{
position: 'absolute',
top: 0,
left: 0,
w: 'full',
h: 'full',
}}
>
<Flex
sx={{
position: 'absolute',
top: 0,
left: 0,
w: 'full',
h: 'full',
bg: 'base.900',
opacity: 0.7,
borderRadius: 'base',
alignItems: 'center',
justifyContent: 'center',
transitionProperty: 'common',
transitionDuration: '0.1s',
}}
/>
<Flex
sx={{
position: 'absolute',
top: 0,
left: 0,
w: 'full',
h: 'full',
opacity: 1,
borderWidth: 2,
borderColor: isOver ? 'base.200' : 'base.500',
borderRadius: 'base',
borderStyle: 'dashed',
transitionProperty: 'common',
transitionDuration: '0.1s',
alignItems: 'center',
justifyContent: 'center',
}}
>
<Text
sx={{
fontSize: '2xl',
fontWeight: 600,
transform: isOver ? 'scale(1.1)' : 'scale(1)',
color: isOver ? 'base.100' : 'base.500',
transitionProperty: 'common',
transitionDuration: '0.1s',
}}
>
{label}
</Text>
</Flex>
</Flex>
</motion.div>
);
};
export default memo(IAIDropOverlay);

View File

@ -30,6 +30,7 @@ import {
} from './canvasTypes'; } from './canvasTypes';
import { ImageDTO } from 'services/api'; import { ImageDTO } from 'services/api';
import { sessionCanceled } from 'services/thunks/session'; import { sessionCanceled } from 'services/thunks/session';
import { setShouldUseCanvasBetaLayout } from 'features/ui/store/uiSlice';
export const initialLayerState: CanvasLayerState = { export const initialLayerState: CanvasLayerState = {
objects: [], objects: [],
@ -851,6 +852,10 @@ export const canvasSlice = createSlice({
state.layerState.stagingArea = initialLayerState.stagingArea; state.layerState.stagingArea = initialLayerState.stagingArea;
} }
}); });
builder.addCase(setShouldUseCanvasBetaLayout, (state, action) => {
state.doesCanvasNeedScaling = true;
});
}, },
}); });

View File

@ -60,7 +60,10 @@ const ControlNetImagePreview = (props: Props) => {
processorType !== 'none'; processorType !== 'none';
return ( return (
<Box ref={containerRef} sx={{ position: 'relative', w: 'full', h: 'full' }}> <Box
ref={containerRef}
sx={{ position: 'relative', w: 'full', h: 'full', aspectRatio: '1/1' }}
>
<IAIDndImage <IAIDndImage
image={controlImage} image={controlImage}
onDrop={handleDrop} onDrop={handleDrop}

View File

@ -51,6 +51,7 @@ const CurrentImageDisplay = () => {
alignItems: 'center', alignItems: 'center',
justifyContent: 'center', justifyContent: 'center',
gap: 4, gap: 4,
position: 'absolute',
}} }}
> >
<CurrentImagePreview /> <CurrentImagePreview />

View File

@ -72,9 +72,10 @@ const InitialImagePreview = () => {
sx={{ sx={{
width: 'full', width: 'full',
height: 'full', height: 'full',
position: 'relative', position: 'absolute',
alignItems: 'center', alignItems: 'center',
justifyContent: 'center', justifyContent: 'center',
p: 4,
}} }}
> >
<IAIDndImage <IAIDndImage

View File

@ -1,72 +0,0 @@
import { createSelector } from '@reduxjs/toolkit';
// import IAICanvas from 'features/canvas/components/IAICanvas';
import { Box, Flex } from '@chakra-ui/react';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import IAICanvas from 'features/canvas/components/IAICanvas';
import IAICanvasResizer from 'features/canvas/components/IAICanvasResizer';
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
import { isEqual } from 'lodash-es';
import { useLayoutEffect } from 'react';
import UnifiedCanvasToolbarBeta from './UnifiedCanvasToolbarBeta';
import UnifiedCanvasToolSettingsBeta from './UnifiedCanvasToolSettingsBeta';
import { requestCanvasRescale } from 'features/canvas/store/thunks/requestCanvasScale';
const selector = createSelector(
[canvasSelector],
(canvas) => {
const { doesCanvasNeedScaling } = canvas;
return {
doesCanvasNeedScaling,
};
},
{
memoizeOptions: {
resultEqualityCheck: isEqual,
},
}
);
const UnifiedCanvasContentBeta = () => {
const dispatch = useAppDispatch();
const { doesCanvasNeedScaling } = useAppSelector(selector);
useLayoutEffect(() => {
dispatch(requestCanvasRescale());
const resizeCallback = () => {
dispatch(requestCanvasRescale());
};
window.addEventListener('resize', resizeCallback);
return () => window.removeEventListener('resize', resizeCallback);
}, [dispatch]);
return (
<Box
sx={{
width: '100%',
height: '100%',
borderRadius: 'base',
bg: 'base.850',
}}
>
<Flex
flexDirection="row"
width="100%"
height="100%"
columnGap={4}
padding={4}
>
<UnifiedCanvasToolbarBeta />
<Flex width="100%" height="100%" flexDirection="column" rowGap={4}>
<UnifiedCanvasToolSettingsBeta />
{doesCanvasNeedScaling ? <IAICanvasResizer /> : <IAICanvas />}
</Flex>
</Flex>
</Box>
);
};
export default UnifiedCanvasContentBeta;

View File

@ -1,34 +1,58 @@
import { Box, Flex } from '@chakra-ui/react'; import { Box, Flex } from '@chakra-ui/react';
import { createSelector } from '@reduxjs/toolkit'; import { createSelector } from '@reduxjs/toolkit';
import { useAppDispatch, useAppSelector } from 'app/store/storeHooks'; import { useAppDispatch, useAppSelector } from 'app/store/storeHooks';
import { defaultSelectorOptions } from 'app/store/util/defaultMemoizeOptions';
import IAICanvas from 'features/canvas/components/IAICanvas'; import IAICanvas from 'features/canvas/components/IAICanvas';
import IAICanvasResizer from 'features/canvas/components/IAICanvasResizer'; import IAICanvasResizer from 'features/canvas/components/IAICanvasResizer';
import IAICanvasToolbar from 'features/canvas/components/IAICanvasToolbar/IAICanvasToolbar'; import IAICanvasToolbar from 'features/canvas/components/IAICanvasToolbar/IAICanvasToolbar';
import { canvasSelector } from 'features/canvas/store/canvasSelectors'; import { canvasSelector } from 'features/canvas/store/canvasSelectors';
import { requestCanvasRescale } from 'features/canvas/store/thunks/requestCanvasScale'; import { requestCanvasRescale } from 'features/canvas/store/thunks/requestCanvasScale';
import { isEqual } from 'lodash-es'; import { uiSelector } from 'features/ui/store/uiSelectors';
import { memo, useLayoutEffect } from 'react'; import { memo, useCallback, useLayoutEffect } from 'react';
import UnifiedCanvasToolbarBeta from './UnifiedCanvasBeta/UnifiedCanvasToolbarBeta';
import UnifiedCanvasToolSettingsBeta from './UnifiedCanvasBeta/UnifiedCanvasToolSettingsBeta';
import { ImageDTO } from 'services/api';
import { setInitialCanvasImage } from 'features/canvas/store/canvasSlice';
import { useDroppable } from '@dnd-kit/core';
import IAIDropOverlay from 'common/components/IAIDropOverlay';
const selector = createSelector( const selector = createSelector(
[canvasSelector], [canvasSelector, uiSelector],
(canvas) => { (canvas, ui) => {
const { doesCanvasNeedScaling } = canvas; const { doesCanvasNeedScaling } = canvas;
const { shouldUseCanvasBetaLayout } = ui;
return { return {
doesCanvasNeedScaling, doesCanvasNeedScaling,
shouldUseCanvasBetaLayout,
}; };
}, },
{ defaultSelectorOptions
memoizeOptions: {
resultEqualityCheck: isEqual,
},
}
); );
const UnifiedCanvasContent = () => { const UnifiedCanvasContent = () => {
const dispatch = useAppDispatch(); const dispatch = useAppDispatch();
const { doesCanvasNeedScaling } = useAppSelector(selector); const { doesCanvasNeedScaling, shouldUseCanvasBetaLayout } =
useAppSelector(selector);
const onDrop = useCallback(
(droppedImage: ImageDTO) => {
dispatch(setInitialCanvasImage(droppedImage));
},
[dispatch]
);
const {
isOver,
setNodeRef: setDroppableRef,
active,
} = useDroppable({
id: 'unifiedCanvas',
data: {
handleDrop: onDrop,
},
});
useLayoutEffect(() => { useLayoutEffect(() => {
dispatch(requestCanvasRescale()); dispatch(requestCanvasRescale());
@ -42,14 +66,57 @@ const UnifiedCanvasContent = () => {
return () => window.removeEventListener('resize', resizeCallback); return () => window.removeEventListener('resize', resizeCallback);
}, [dispatch]); }, [dispatch]);
if (shouldUseCanvasBetaLayout) {
return (
<Box
ref={setDroppableRef}
tabIndex={0}
sx={{
w: 'full',
h: 'full',
borderRadius: 'base',
bg: 'base.850',
p: 4,
}}
>
<Flex
sx={{
w: 'full',
h: 'full',
gap: 4,
}}
>
<UnifiedCanvasToolbarBeta />
<Flex
sx={{
flexDir: 'column',
w: 'full',
h: 'full',
gap: 4,
position: 'relative',
}}
>
<UnifiedCanvasToolSettingsBeta />
<Box sx={{ w: 'full', h: 'full', position: 'relative' }}>
{doesCanvasNeedScaling ? <IAICanvasResizer /> : <IAICanvas />}
{active && <IAIDropOverlay isOver={isOver} />}
</Box>
</Flex>
</Flex>
</Box>
);
}
return ( return (
<Box <Box
ref={setDroppableRef}
tabIndex={-1}
sx={{ sx={{
width: '100%', w: 'full',
height: '100%', h: 'full',
padding: 4,
borderRadius: 'base', borderRadius: 'base',
bg: 'base.850', bg: 'base.850',
p: 4,
}} }}
> >
<Flex <Flex
@ -57,8 +124,8 @@ const UnifiedCanvasContent = () => {
flexDirection: 'column', flexDirection: 'column',
alignItems: 'center', alignItems: 'center',
gap: 4, gap: 4,
width: '100%', w: 'full',
height: '100%', h: 'full',
}} }}
> >
<IAICanvasToolbar /> <IAICanvasToolbar />
@ -68,11 +135,14 @@ const UnifiedCanvasContent = () => {
alignItems: 'center', alignItems: 'center',
justifyContent: 'center', justifyContent: 'center',
gap: 4, gap: 4,
width: '100%', w: 'full',
height: '100%', h: 'full',
}} }}
> >
{doesCanvasNeedScaling ? <IAICanvasResizer /> : <IAICanvas />} <Box sx={{ w: 'full', h: 'full', position: 'relative' }}>
{doesCanvasNeedScaling ? <IAICanvasResizer /> : <IAICanvas />}
{active && <IAIDropOverlay isOver={isOver} />}
</Box>
</Flex> </Flex>
</Flex> </Flex>
</Box> </Box>

View File

@ -1,34 +1,16 @@
import { Flex } from '@chakra-ui/react'; import { Flex } from '@chakra-ui/react';
import { memo } from 'react'; import { memo } from 'react';
import { createSelector } from '@reduxjs/toolkit';
import { uiSelector } from 'features/ui/store/uiSelectors';
import { useAppSelector } from 'app/store/storeHooks';
import UnifiedCanvasContent from './UnifiedCanvasContent'; import UnifiedCanvasContent from './UnifiedCanvasContent';
import UnifiedCanvasParameters from './UnifiedCanvasParameters'; import UnifiedCanvasParameters from './UnifiedCanvasParameters';
import UnifiedCanvasContentBeta from './UnifiedCanvasBeta/UnifiedCanvasContentBeta';
import ParametersPinnedWrapper from '../../ParametersPinnedWrapper'; import ParametersPinnedWrapper from '../../ParametersPinnedWrapper';
const selector = createSelector(uiSelector, (ui) => {
const { shouldUseCanvasBetaLayout } = ui;
return {
shouldUseCanvasBetaLayout,
};
});
const UnifiedCanvasTab = () => { const UnifiedCanvasTab = () => {
const { shouldUseCanvasBetaLayout } = useAppSelector(selector);
return ( return (
<Flex sx={{ gap: 4, w: 'full', h: 'full' }}> <Flex sx={{ gap: 4, w: 'full', h: 'full' }}>
<ParametersPinnedWrapper> <ParametersPinnedWrapper>
<UnifiedCanvasParameters /> <UnifiedCanvasParameters />
</ParametersPinnedWrapper> </ParametersPinnedWrapper>
{shouldUseCanvasBetaLayout ? ( <UnifiedCanvasContent />
<UnifiedCanvasContentBeta />
) : (
<UnifiedCanvasContent />
)}
</Flex> </Flex>
); );
}; };

View File

@ -6,9 +6,8 @@ from omegaconf import OmegaConf
from pathlib import Path from pathlib import Path
os.environ['INVOKEAI_ROOT']='/tmp' os.environ['INVOKEAI_ROOT']='/tmp'
sys.argv = [] # to prevent config from trying to parse pytest arguments
from invokeai.app.services.config import InvokeAIAppConfig, InvokeAISettings from invokeai.app.services.config import InvokeAIAppConfig
from invokeai.app.invocations.generate import TextToImageInvocation from invokeai.app.invocations.generate import TextToImageInvocation
@ -36,48 +35,56 @@ def test_use_init():
# note that we explicitly set omegaconf dict and argv here # note that we explicitly set omegaconf dict and argv here
# so that the values aren't read from ~invokeai/invokeai.yaml and # so that the values aren't read from ~invokeai/invokeai.yaml and
# sys.argv respectively. # sys.argv respectively.
conf1 = InvokeAIAppConfig(init1,[]) conf1 = InvokeAIAppConfig.get_config()
assert conf1 assert conf1
conf1.parse_args(conf=init1)
assert conf1.max_loaded_models==5 assert conf1.max_loaded_models==5
assert not conf1.nsfw_checker assert not conf1.nsfw_checker
conf2 = InvokeAIAppConfig(init2,[]) conf2 = InvokeAIAppConfig.get_config()
assert conf2 assert conf2
conf2.parse_args(conf=init2)
assert conf2.nsfw_checker assert conf2.nsfw_checker
assert conf2.max_loaded_models==2 assert conf2.max_loaded_models==2
assert not hasattr(conf2,'invalid_attribute') assert not hasattr(conf2,'invalid_attribute')
def test_argv_override(): def test_argv_override():
conf = InvokeAIAppConfig(init1,['--nsfw_checker','--max_loaded=10']) conf = InvokeAIAppConfig.get_config()
conf.parse_args(conf=init1,argv=['--nsfw_checker','--max_loaded=10'])
assert conf.nsfw_checker assert conf.nsfw_checker
assert conf.max_loaded_models==10 assert conf.max_loaded_models==10
assert conf.outdir==Path('outputs') # this is the default assert conf.outdir==Path('outputs') # this is the default
def test_env_override(): def test_env_override():
# argv overrides # argv overrides
conf = InvokeAIAppConfig(conf=init1,argv=['--max_loaded=10']) conf = InvokeAIAppConfig()
conf.parse_args(conf=init1,argv=['--max_loaded=10'])
assert conf.nsfw_checker==False assert conf.nsfw_checker==False
os.environ['INVOKEAI_nsfw_checker'] = 'True' os.environ['INVOKEAI_nsfw_checker'] = 'True'
conf = InvokeAIAppConfig(conf=init1,argv=['--max_loaded=10']) conf.parse_args(conf=init1,argv=['--max_loaded=10'])
assert conf.nsfw_checker==True assert conf.nsfw_checker==True
# environment variables should be case insensitive # environment variables should be case insensitive
os.environ['InvokeAI_Max_Loaded_Models'] = '15' os.environ['InvokeAI_Max_Loaded_Models'] = '15'
conf = InvokeAIAppConfig(conf=init1) conf = InvokeAIAppConfig()
conf.parse_args(conf=init1)
assert conf.max_loaded_models == 15 assert conf.max_loaded_models == 15
conf = InvokeAIAppConfig(conf=init1,argv=['--no-nsfw_checker','--max_loaded=10']) conf = InvokeAIAppConfig()
conf.parse_args(conf=init1,argv=['--no-nsfw_checker','--max_loaded=10'])
assert conf.nsfw_checker==False assert conf.nsfw_checker==False
assert conf.max_loaded_models==10 assert conf.max_loaded_models==10
conf = InvokeAIAppConfig(conf=init1,argv=[],max_loaded_models=20) conf = InvokeAIAppConfig.get_config(max_loaded_models=20)
conf.parse_args(conf=init1,argv=[])
assert conf.max_loaded_models==20 assert conf.max_loaded_models==20
def test_type_coercion(): def test_type_coercion():
conf = InvokeAIAppConfig(argv=['--root=/tmp/foobar']) conf = InvokeAIAppConfig().get_config()
conf.parse_args(argv=['--root=/tmp/foobar'])
assert conf.root==Path('/tmp/foobar') assert conf.root==Path('/tmp/foobar')
assert isinstance(conf.root,Path) assert isinstance(conf.root,Path)
conf = InvokeAIAppConfig(argv=['--root=/tmp/foobar'],root='/tmp/different') conf = InvokeAIAppConfig.get_config(root='/tmp/different')
conf.parse_args(argv=['--root=/tmp/foobar'])
assert conf.root==Path('/tmp/different') assert conf.root==Path('/tmp/different')
assert isinstance(conf.root,Path) assert isinstance(conf.root,Path)