mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Compare commits
47 Commits
developmen
...
v2.2.3
Author | SHA1 | Date | |
---|---|---|---|
b487fa4391 | |||
12b98ba4ec | |||
fa25a64d37 | |||
29540452f2 | |||
c7960f930a | |||
c1c8b5026a | |||
5da42e0ad2 | |||
34d6f35408 | |||
401165ba35 | |||
6d8057c84f | |||
3f23dee6f4 | |||
8cdd961ad2 | |||
470b267939 | |||
bf399e303c | |||
b3d7ad7461 | |||
cd66b2c76d | |||
6b406e2b5e | |||
6737cc1443 | |||
7fd0eeb9f9 | |||
16e3b45fa2 | |||
2f07ea03a9 | |||
b563d75c58 | |||
a7b7b20d16 | |||
a47ef3ded9 | |||
7cb9b654f3 | |||
8819e12a86 | |||
967eb60ea9 | |||
b1091ecda1 | |||
2723dd9051 | |||
8f050d992e | |||
0346095876 | |||
f9bbc55f74 | |||
878a3907e9 | |||
4cfb41d9ae | |||
6ec64ecb3c | |||
540315edaa | |||
cf10a1b736 | |||
9fb2a43780 | |||
1b743f7d9b | |||
d7bf3f7d7b | |||
eba31e7caf | |||
bde456f9fa | |||
9ee83380e6 | |||
6982e6a469 | |||
0f4d71ed63 | |||
8f3f64b22e | |||
dba0280790 |
@ -6,23 +6,26 @@ IFS=$'\n\t'
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
VERSION='2.2.3'
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.sh InvokeAI
|
||||
cp install.sh.in InvokeAI/install.sh
|
||||
chmod a+x InvokeAI/install.sh
|
||||
cp readme.txt InvokeAI
|
||||
|
||||
zip -r InvokeAI-linux.zip InvokeAI
|
||||
zip -r InvokeAI-mac.zip InvokeAI
|
||||
zip -r InvokeAI-binary-$VERSION-linux.zip InvokeAI
|
||||
zip -r InvokeAI-binary-$VERSION-mac.zip InvokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.bat InvokeAI
|
||||
cp install.bat.in InvokeAI/install.bat
|
||||
cp readme.txt InvokeAI
|
||||
cp WinLongPathsEnabled.reg InvokeAI
|
||||
|
||||
zip -r InvokeAI-windows.zip InvokeAI
|
||||
zip -r InvokeAI-binary-$VERSION-windows.zip InvokeAI
|
||||
|
||||
rm -rf InvokeAI
|
||||
|
@ -10,6 +10,9 @@
|
||||
|
||||
@rem This enables a user to install this project without manually installing git or Python
|
||||
|
||||
@rem change to the script's directory
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set "no_cache_dir=--no-cache-dir"
|
||||
if "%1" == "use-cache" (
|
||||
set "no_cache_dir="
|
||||
@ -22,9 +25,7 @@ set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
@rem https://mamba.readthedocs.io/en/latest/installation.html
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
#set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
# RELEASE_SOURCEBALL=/archive/refs/heads/test-installer.tar.gz
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/development.tar.gz
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
set PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
set PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-x86_64-pc-windows-msvc-shared-install_only.tar.gz
|
||||
|
||||
@ -127,7 +128,7 @@ if %errorlevel% neq 0 goto err_exit
|
||||
echo ***** Updated pip and wheel *****
|
||||
|
||||
set err_msg=----- requirements file copy failed -----
|
||||
copy installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
||||
copy binary_installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- main pip install failed -----
|
||||
@ -140,11 +141,11 @@ set err_msg=----- InvokeAI setup failed -----
|
||||
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -e .
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
copy installer\invoke.bat .\invoke.bat
|
||||
copy binary_installer\invoke.bat.in .\invoke.bat
|
||||
echo ***** Installed invoke launcher script ******
|
||||
|
||||
@rem more cleanup
|
||||
rd /s /q installer installer_files
|
||||
rd /s /q binary_installer installer_files
|
||||
|
||||
@rem preload the models
|
||||
call .venv\Scripts\python scripts\configure_invokeai.py
|
@ -1,5 +1,9 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
@ -22,6 +26,8 @@ function _err_exit {
|
||||
|
||||
# This enables a user to install this project without manually installing git or Python
|
||||
|
||||
echo -e "\n***** Installing InvokeAI into $(pwd)... *****\n"
|
||||
|
||||
export no_cache_dir="--no-cache-dir"
|
||||
if [ $# -ge 1 ]; then
|
||||
if [ "$1" = "use-cache" ]; then
|
||||
@ -29,10 +35,6 @@ if [ $# -ge 1 ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "$no_cache_dir"
|
||||
|
||||
echo -e "\n***** Installing InvokeAI... *****\n"
|
||||
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
@ -80,19 +82,17 @@ if [ "$OS_NAME" == "darwin" ] && [ "$OS_ARCH" == "arm64" ]; then
|
||||
fi
|
||||
|
||||
# config
|
||||
echo "USING development BRANCH. REMEMBER TO CHANGE TO main BEFORE RELEASE"
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${MAMBA_OS_NAME}-${MAMBA_ARCH}/latest"
|
||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
# RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
# RELEASE_SOURCEBALL=/archive/refs/heads/test-installer.tar.gz
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/development.tar.gz
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-apple-darwin-install_only.tar.gz
|
||||
elif [ "$OS_NAME" == "linux" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
|
||||
fi
|
||||
echo "INSTALLING $RELEASE_SOURCEBALL FROM $RELEASE_URL"
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
|
||||
@ -192,32 +192,33 @@ echo -e "We're running under"
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- pip update failed -----\n"
|
||||
.venv/bin/python3 -m pip install "$no_cache_dir" --no-warn-script-location --upgrade pip wheel
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location --upgrade pip
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Updated pip and wheel *****\n"
|
||||
echo -e "\n***** Updated pip *****\n"
|
||||
|
||||
_err_msg="\n----- requirements file copy failed -----\n"
|
||||
cp installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
||||
cp binary_installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- main pip install failed -----\n"
|
||||
.venv/bin/python3 -m pip install "$no_cache_dir" --no-warn-script-location -r requirements.txt
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -r requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed Python dependencies *****\n"
|
||||
|
||||
_err_msg="\n----- InvokeAI setup failed -----\n"
|
||||
.venv/bin/python3 -m pip install "$no_cache_dir" --no-warn-script-location -e .
|
||||
.venv/bin/python3 -m pip install $no_cache_dir --no-warn-script-location -e .
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed InvokeAI *****\n"
|
||||
|
||||
cp installer/invoke.sh .
|
||||
cp binary_installer/invoke.sh.in ./invoke.sh
|
||||
chmod a+rx ./invoke.sh
|
||||
echo -e "\n***** Installed invoke launcher script ******\n"
|
||||
|
||||
# more cleanup
|
||||
rm -rf installer/ installer_files/
|
||||
rm -rf binary_installer/ installer_files/
|
||||
|
||||
# preload the models
|
||||
.venv/bin/python3 scripts/configure_invokeai.py
|
||||
@ -227,6 +228,8 @@ deactivate
|
||||
|
||||
echo -e "\n***** Finished downloading models *****\n"
|
||||
|
||||
echo "All done! Run the command './invoke.sh' to start InvokeAI."
|
||||
echo "All done! Run the command"
|
||||
echo " $scriptdir/invoke.sh"
|
||||
echo "to start InvokeAI."
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
5
installer/invoke.sh → binary_installer/invoke.sh.in
Executable file → Normal file
5
installer/invoke.sh → binary_installer/invoke.sh.in
Executable file → Normal file
@ -4,6 +4,11 @@ set -eu
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
# set required env var for torch on mac MPS
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. command-line"
|
||||
echo "2. browser-based UI"
|
@ -2,9 +2,10 @@
|
||||
# This file is autogenerated by pip-compile with python 3.10
|
||||
# To update, run:
|
||||
#
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-darwin-x86_64-cpu-reqs.txt installer/requirements.in
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-darwin-arm64-mps-reqs.txt installer/requirements.in
|
||||
#
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https
|
||||
|
||||
absl-py==1.3.0 \
|
||||
@ -150,6 +151,10 @@ blinker==1.5 \
|
||||
--hash=sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36 \
|
||||
--hash=sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462
|
||||
# via streamlit
|
||||
boltons==21.0.0 \
|
||||
--hash=sha256:65e70a79a731a7fe6e98592ecfb5ccf2115873d01dbc576079874629e5c90f13 \
|
||||
--hash=sha256:b9bb7b58b2b420bbe11a6025fdef6d3e5edc9f76a42fb467afe7ca212ef9948b
|
||||
# via torchsde
|
||||
cachetools==5.2.0 \
|
||||
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
|
||||
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
|
||||
@ -608,8 +613,8 @@ jsonschema==4.17.0 \
|
||||
# via
|
||||
# altair
|
||||
# jsonmerge
|
||||
k-diffusion @ https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip \
|
||||
--hash=sha256:c3f2c84036aa98c3abf4552fafab04df5ca472aa639982795e05bb1db43ce5e4
|
||||
k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip \
|
||||
--hash=sha256:8eac5cdc08736e6d61908a1b2948f2b2f62691b01dc1aab978bddb3451af0d66
|
||||
# via -r installer/requirements.in
|
||||
kiwisolver==1.4.4 \
|
||||
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
||||
@ -1003,6 +1008,7 @@ numpy==1.23.4 \
|
||||
# tifffile
|
||||
# torch-fidelity
|
||||
# torchmetrics
|
||||
# torchsde
|
||||
# torchvision
|
||||
# transformers
|
||||
oauthlib==3.2.2 \
|
||||
@ -1654,6 +1660,7 @@ scipy==1.9.3 \
|
||||
# scikit-learn
|
||||
# torch-fidelity
|
||||
# torchdiffeq
|
||||
# torchsde
|
||||
semver==2.13.0 \
|
||||
--hash=sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4 \
|
||||
--hash=sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f
|
||||
@ -1869,6 +1876,7 @@ torch==1.12.1 ; platform_system == "Darwin" \
|
||||
# torch-fidelity
|
||||
# torchdiffeq
|
||||
# torchmetrics
|
||||
# torchsde
|
||||
# torchvision
|
||||
torch-fidelity==0.3.0 \
|
||||
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
||||
@ -1882,6 +1890,10 @@ torchmetrics==0.10.2 \
|
||||
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
||||
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
||||
# via pytorch-lightning
|
||||
torchsde==0.2.5 \
|
||||
--hash=sha256:222be9e15610d37a4b5a71cfa0c442178f9fd9ca02f6522a3e11c370b3d0906b \
|
||||
--hash=sha256:4c34373a94a357bdf60bbfee00c850f3563d634491555820b900c9a4f7eff300
|
||||
# via k-diffusion
|
||||
torchvision==0.13.1 ; platform_system == "Darwin" \
|
||||
--hash=sha256:0298bae3b09ac361866088434008d82b99d6458fe8888c8df90720ef4b347d44 \
|
||||
--hash=sha256:08f592ea61836ebeceb5c97f4d7a813b9d7dc651bbf7ce4401563ccfae6a21fc \
|
||||
@ -1942,6 +1954,9 @@ tqdm==4.64.1 \
|
||||
# taming-transformers-rom1504
|
||||
# torch-fidelity
|
||||
# transformers
|
||||
trampoline==0.1.2 \
|
||||
--hash=sha256:36cc9a4ff9811843d177fc0e0740efbd7da39eadfe6e50c9e2937cbc06d899d9
|
||||
# via torchsde
|
||||
transformers==4.24.0 \
|
||||
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
||||
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
File diff suppressed because it is too large
Load Diff
@ -1,9 +1,10 @@
|
||||
#
|
||||
# This file is autogenerated by pip-compile with python 3.9
|
||||
# To update, run:
|
||||
# This file is autogenerated by pip-compile with Python 3.9
|
||||
# by the following command:
|
||||
#
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-linux-x86_64-cuda-reqs.txt installer/requirements.in
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=binary_installer/py3.10-linux-x86_64-cuda-reqs.txt binary_installer/requirements.in
|
||||
#
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https
|
||||
|
||||
@ -17,7 +18,7 @@ accelerate==0.14.0 \
|
||||
--hash=sha256:31c5bcc40564ef849b5bc1c4424a43ccaf9e26413b7df89c2e36bf81f070fd44 \
|
||||
--hash=sha256:b15d562c0889d0cf441b01faa025dfc29b163d061b6cc7d489c2c83b0a55ffab
|
||||
# via
|
||||
# -r installer/requirements.in
|
||||
# -r binary_installer/requirements.in
|
||||
# k-diffusion
|
||||
addict==2.4.0 \
|
||||
--hash=sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc \
|
||||
@ -119,7 +120,7 @@ aiosignal==1.2.0 \
|
||||
albumentations==1.3.0 \
|
||||
--hash=sha256:294165d87d03bc8323e484927f0a5c1a3c64b0e7b9c32a979582a6c93c363bdf \
|
||||
--hash=sha256:be1af36832c8893314f2a5550e8ac19801e04770734c1b70fa3c996b41f37bed
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
altair==4.2.0 \
|
||||
--hash=sha256:0c724848ae53410c13fa28be2b3b9a9dcb7b5caa1a70f7f217bd663bb419935a \
|
||||
--hash=sha256:d87d9372e63b48cd96b2a6415f0cf9457f50162ab79dc7a31cd7e024dd840026
|
||||
@ -150,6 +151,10 @@ blinker==1.5 \
|
||||
--hash=sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36 \
|
||||
--hash=sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462
|
||||
# via streamlit
|
||||
boltons==21.0.0 \
|
||||
--hash=sha256:65e70a79a731a7fe6e98592ecfb5ccf2115873d01dbc576079874629e5c90f13 \
|
||||
--hash=sha256:b9bb7b58b2b420bbe11a6025fdef6d3e5edc9f76a42fb467afe7ca212ef9948b
|
||||
# via torchsde
|
||||
cachetools==5.2.0 \
|
||||
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
|
||||
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
|
||||
@ -183,11 +188,11 @@ click==8.1.3 \
|
||||
clip @ https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip \
|
||||
--hash=sha256:b5842c25da441d6c581b53a5c60e0c2127ebafe0f746f8e15561a006c6c3be6a
|
||||
# via
|
||||
# -r installer/requirements.in
|
||||
# -r binary_installer/requirements.in
|
||||
# clipseg
|
||||
clipseg @ https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip \
|
||||
--hash=sha256:14f43ed42f90be3fe57f06de483cb8be0f67f87a6f62a011339d45a39f4b4189
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
commonmark==0.9.1 \
|
||||
--hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \
|
||||
--hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9
|
||||
@ -274,7 +279,7 @@ decorator==5.1.1 \
|
||||
diffusers==0.7.2 \
|
||||
--hash=sha256:4a5f8b3a5fbd936bba7d459611cb35ec62875030367be32b232f9e19543e25a9 \
|
||||
--hash=sha256:fb814ffd150cc6f470380b8c6a521181a77beb2f44134d2aad2e4cd8aa2ced0e
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
dnspython==2.2.1 \
|
||||
--hash=sha256:0f7569a4a6ff151958b64304071d370daa3243d15941a7beedf0c9fe5105603e \
|
||||
--hash=sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f
|
||||
@ -294,7 +299,7 @@ entrypoints==0.4 \
|
||||
eventlet==0.33.1 \
|
||||
--hash=sha256:a085922698e5029f820cf311a648ac324d73cec0e4792877609d978a4b5bbf31 \
|
||||
--hash=sha256:afbe17f06a58491e9aebd7a4a03e70b0b63fd4cf76d8307bae07f280479b1515
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
facexlib==0.2.5 \
|
||||
--hash=sha256:31e20cc4ed5d63562d380e4564bae14ac0d5d1899a079bad87621e13564567e4 \
|
||||
--hash=sha256:cc7ceb56c5424319c47223cf75eef6828c34c66082707c6eb35b95d39779f02d
|
||||
@ -320,15 +325,15 @@ flask==2.2.2 \
|
||||
flask-cors==3.0.10 \
|
||||
--hash=sha256:74efc975af1194fc7891ff5cd85b0f7478be4f7f59fe158102e91abb72bb4438 \
|
||||
--hash=sha256:b60839393f3b84a0f3746f6cdca56c1ad7426aa738b70d6c61375857823181de
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
flask-socketio==5.3.1 \
|
||||
--hash=sha256:fd0ed0fc1341671d92d5f5b2f5503916deb7aa7e2940e6636cfa2c087c828bf9 \
|
||||
--hash=sha256:ff0c721f20bff1e2cfba77948727a8db48f187e89a72fe50c34478ce6efb3353
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
flaskwebgui==0.3.7 \
|
||||
--hash=sha256:4a69955308eaa8bb256ba04a994dc8f58a48dcd6f9599694ab1bcd9f43d88a5d \
|
||||
--hash=sha256:535974ce2672dcc74787c254de24cceed4101be75d96952dae82014dd57f061e
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
fonttools==4.38.0 \
|
||||
--hash=sha256:2bb244009f9bf3fa100fc3ead6aeb99febe5985fa20afbfbaa2f8946c2fbdaf1 \
|
||||
--hash=sha256:820466f43c8be8c3009aef8b87e785014133508f0de64ec469e4efb643ae54fb
|
||||
@ -412,11 +417,11 @@ future==0.18.2 \
|
||||
getpass-asterisk==1.0.1 \
|
||||
--hash=sha256:20d45cafda0066d761961e0919728526baf7bb5151fbf48a7d5ea4034127d857 \
|
||||
--hash=sha256:7cc357a924cf62fa4e15b73cb4e5e30685c9084e464ffdc3fd9000a2b54ea9e9
|
||||
# via -r installer/requirements.in
|
||||
gfpgan @ https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip \
|
||||
--hash=sha256:79e6d71c8f1df7c7ccb0ac6b9a2ccb615ad5cde818c8b6f285a8711c05aebf85
|
||||
# via -r binary_installer/requirements.in
|
||||
gfpgan @ https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system == "Linux" or platform_system == "Darwin" \
|
||||
--hash=sha256:4155907b8b7db3686324554df7007eedd245cdf8656c21da9d9a3f44bef2fcaa
|
||||
# via
|
||||
# -r installer/requirements.in
|
||||
# -r binary_installer/requirements.in
|
||||
# realesrgan
|
||||
gitdb==4.0.9 \
|
||||
--hash=sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd \
|
||||
@ -577,7 +582,7 @@ imageio-ffmpeg==0.4.7 \
|
||||
--hash=sha256:7a08838f97f363e37ca41821b864fd3fdc99ab1fe2421040c78eb5f56a9e723e \
|
||||
--hash=sha256:8e724d12dfe83e2a6eb39619e820243ca96c81c47c2648e66e05f7ee24e14312 \
|
||||
--hash=sha256:fc60686ef03c2d0f842901b206223c30051a6a120384458761390104470846fd
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
importlib-metadata==5.0.0 \
|
||||
--hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \
|
||||
--hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43
|
||||
@ -610,9 +615,9 @@ jsonschema==4.17.0 \
|
||||
# via
|
||||
# altair
|
||||
# jsonmerge
|
||||
k-diffusion @ https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip \
|
||||
--hash=sha256:c3f2c84036aa98c3abf4552fafab04df5ca472aa639982795e05bb1db43ce5e4
|
||||
# via -r installer/requirements.in
|
||||
k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip \
|
||||
--hash=sha256:8eac5cdc08736e6d61908a1b2948f2b2f62691b01dc1aab978bddb3451af0d66
|
||||
# via -r binary_installer/requirements.in
|
||||
kiwisolver==1.4.4 \
|
||||
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
||||
--hash=sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166 \
|
||||
@ -1005,6 +1010,7 @@ numpy==1.23.4 \
|
||||
# tifffile
|
||||
# torch-fidelity
|
||||
# torchmetrics
|
||||
# torchsde
|
||||
# torchvision
|
||||
# transformers
|
||||
oauthlib==3.2.2 \
|
||||
@ -1091,7 +1097,7 @@ pathtools==0.1.2 \
|
||||
picklescan==0.0.5 \
|
||||
--hash=sha256:368cf1b9a075bc1b6460ad82b694f260532b836c82f99d13846cd36e1bbe7f9a \
|
||||
--hash=sha256:57153eca04d5df5009f2cdd595aef261b8a6f27e03046a1c84f672aa6869c592
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
pillow==9.3.0 \
|
||||
--hash=sha256:03150abd92771742d4a8cd6f2fa6246d847dcd2e332a18d0c15cc75bf6703040 \
|
||||
--hash=sha256:073adb2ae23431d3b9bcbcff3fe698b62ed47211d0716b067385538a1b0f28b8 \
|
||||
@ -1300,11 +1306,11 @@ pyparsing==3.0.9 \
|
||||
# packaging
|
||||
pypatchmatch @ https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip \
|
||||
--hash=sha256:4ad6ec95379e7d122d494ff76633cc7cf9b71330d5efda147fceba81e3dc6cd2
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
pyreadline3==3.4.1 \
|
||||
--hash=sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae \
|
||||
--hash=sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
pyrsistent==0.19.2 \
|
||||
--hash=sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed \
|
||||
--hash=sha256:111156137b2e71f3a9936baf27cb322e8024dac3dc54ec7fb9f0bcf3249e68bb \
|
||||
@ -1441,7 +1447,7 @@ qudida==0.0.4 \
|
||||
realesrgan==0.3.0 \
|
||||
--hash=sha256:0d36da96ab9f447071606e91f502ccdfb08f80cc82ee4f8caf720c7745ccec7e \
|
||||
--hash=sha256:59336c16c30dd5130eff350dd27424acb9b7281d18a6810130e265606c9a6088
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
regex==2022.10.31 \
|
||||
--hash=sha256:052b670fafbe30966bbe5d025e90b2a491f85dfe5b2583a163b5e60a85a321ad \
|
||||
--hash=sha256:0653d012b3bf45f194e5e6a41df9258811ac8fc395579fa82958a8b76286bea4 \
|
||||
@ -1656,6 +1662,7 @@ scipy==1.9.3 \
|
||||
# scikit-learn
|
||||
# torch-fidelity
|
||||
# torchdiffeq
|
||||
# torchsde
|
||||
semver==2.13.0 \
|
||||
--hash=sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4 \
|
||||
--hash=sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f
|
||||
@ -1663,7 +1670,7 @@ semver==2.13.0 \
|
||||
send2trash==1.8.0 \
|
||||
--hash=sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d \
|
||||
--hash=sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
sentry-sdk==1.10.1 \
|
||||
--hash=sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad \
|
||||
--hash=sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691
|
||||
@ -1754,11 +1761,11 @@ smmap==5.0.0 \
|
||||
streamlit==1.14.0 \
|
||||
--hash=sha256:62556d873567e1b3427bcd118a57ee6946619f363bd6bba38df2d1f8225ecba0 \
|
||||
--hash=sha256:e078b8143d150ba721bdb9194218e311c5fe1d6d4156473a2dea6cc848a6c9fc
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
taming-transformers-rom1504==0.0.6 \
|
||||
--hash=sha256:051b5804c58caa247bcd51d17ddb525b4d5f892a29d42dc460f40e3e9e34e5d8 \
|
||||
--hash=sha256:73fe5fc1108accee4236ee6976e0987ab236afad0af06cb9f037641a908d2c32
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
tb-nightly==2.11.0a20221106 \
|
||||
--hash=sha256:8940457ee42db92f01da8bcdbbea1a476735eda559dde5976f5728919960af4a
|
||||
# via
|
||||
@ -1783,7 +1790,7 @@ tensorboard-plugin-wit==1.8.1 \
|
||||
# tensorboard
|
||||
test-tube==0.7.5 \
|
||||
--hash=sha256:1379c33eb8cde3e9b36610f87da0f16c2e06496b1cfebac473df4e7be2faa124
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
threadpoolctl==3.1.0 \
|
||||
--hash=sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b \
|
||||
--hash=sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380
|
||||
@ -1843,7 +1850,7 @@ torch==1.12.0+cu116 ; platform_system == "Linux" or platform_system == "Windows"
|
||||
--hash=sha256:aa43d7b54b86f723f17c5c44df1078c59a6149fc4d42fbef08aafab9d61451c9 \
|
||||
--hash=sha256:f772be831447dd01ebd26cbedf619e668d1b269d69bf6b4ff46b1378362bff26
|
||||
# via
|
||||
# -r installer/requirements.in
|
||||
# -r binary_installer/requirements.in
|
||||
# accelerate
|
||||
# basicsr
|
||||
# clean-fid
|
||||
@ -1859,11 +1866,12 @@ torch==1.12.0+cu116 ; platform_system == "Linux" or platform_system == "Windows"
|
||||
# torch-fidelity
|
||||
# torchdiffeq
|
||||
# torchmetrics
|
||||
# torchsde
|
||||
# torchvision
|
||||
torch-fidelity==0.3.0 \
|
||||
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
||||
--hash=sha256:d01284825595feb7dc3eae3dc9a0d8ced02be764813a3483f109bc142b52a1d3
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
torchdiffeq==0.2.3 \
|
||||
--hash=sha256:b5b01ec1294a2d8d5f77e567bf17c5de1237c0573cb94deefa88326f0e18c338 \
|
||||
--hash=sha256:fe75f434b9090ac0c27702e02bed21472b0f87035be6581f51edc5d4013ea31a
|
||||
@ -1872,6 +1880,10 @@ torchmetrics==0.10.2 \
|
||||
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
||||
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
||||
# via pytorch-lightning
|
||||
torchsde==0.2.5 \
|
||||
--hash=sha256:222be9e15610d37a4b5a71cfa0c442178f9fd9ca02f6522a3e11c370b3d0906b \
|
||||
--hash=sha256:4c34373a94a357bdf60bbfee00c850f3563d634491555820b900c9a4f7eff300
|
||||
# via k-diffusion
|
||||
torchvision==0.13.0+cu116 ; platform_system == "Linux" or platform_system == "Windows" \
|
||||
--hash=sha256:1696feadf1921c8fa1549bad774221293298288ebedaa14e44bc3e57e964a369 \
|
||||
--hash=sha256:572544b108eaf12638f3dca0f496a453c4b8d8256bcc8333d5355df641c0380c \
|
||||
@ -1882,7 +1894,7 @@ torchvision==0.13.0+cu116 ; platform_system == "Linux" or platform_system == "Wi
|
||||
--hash=sha256:cb6bf0117b8f4b601baeae54e8a6bb5c4942b054835ba997f438ddcb7adcfb90 \
|
||||
--hash=sha256:d1a3c124645e3460b3e50b54eb89a2575a5036bfa618f15dc4f5d635c716069d
|
||||
# via
|
||||
# -r installer/requirements.in
|
||||
# -r binary_installer/requirements.in
|
||||
# basicsr
|
||||
# clean-fid
|
||||
# clip
|
||||
@ -1921,10 +1933,13 @@ tqdm==4.64.1 \
|
||||
# taming-transformers-rom1504
|
||||
# torch-fidelity
|
||||
# transformers
|
||||
trampoline==0.1.2 \
|
||||
--hash=sha256:36cc9a4ff9811843d177fc0e0740efbd7da39eadfe6e50c9e2937cbc06d899d9
|
||||
# via torchsde
|
||||
transformers==4.24.0 \
|
||||
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
||||
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
||||
# via -r installer/requirements.in
|
||||
# via -r binary_installer/requirements.in
|
||||
typing-extensions==4.4.0 \
|
||||
--hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \
|
||||
--hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e
|
@ -4,6 +4,7 @@
|
||||
#
|
||||
# pip-compile --allow-unsafe --generate-hashes --output-file=installer/py3.10-windows-x86_64-cuda-reqs.txt installer/requirements.in
|
||||
#
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https
|
||||
|
||||
@ -150,6 +151,10 @@ blinker==1.5 \
|
||||
--hash=sha256:1eb563df6fdbc39eeddc177d953203f99f097e9bf0e2b8f9f3cf18b6ca425e36 \
|
||||
--hash=sha256:923e5e2f69c155f2cc42dafbbd70e16e3fde24d2d4aa2ab72fbe386238892462
|
||||
# via streamlit
|
||||
boltons==21.0.0 \
|
||||
--hash=sha256:65e70a79a731a7fe6e98592ecfb5ccf2115873d01dbc576079874629e5c90f13 \
|
||||
--hash=sha256:b9bb7b58b2b420bbe11a6025fdef6d3e5edc9f76a42fb467afe7ca212ef9948b
|
||||
# via torchsde
|
||||
cachetools==5.2.0 \
|
||||
--hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \
|
||||
--hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db
|
||||
@ -614,8 +619,8 @@ jsonschema==4.17.0 \
|
||||
# via
|
||||
# altair
|
||||
# jsonmerge
|
||||
k-diffusion @ https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip \
|
||||
--hash=sha256:c3f2c84036aa98c3abf4552fafab04df5ca472aa639982795e05bb1db43ce5e4
|
||||
k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip \
|
||||
--hash=sha256:8eac5cdc08736e6d61908a1b2948f2b2f62691b01dc1aab978bddb3451af0d66
|
||||
# via -r installer/requirements.in
|
||||
kiwisolver==1.4.4 \
|
||||
--hash=sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b \
|
||||
@ -1009,6 +1014,7 @@ numpy==1.23.4 \
|
||||
# tifffile
|
||||
# torch-fidelity
|
||||
# torchmetrics
|
||||
# torchsde
|
||||
# torchvision
|
||||
# transformers
|
||||
oauthlib==3.2.2 \
|
||||
@ -1660,6 +1666,7 @@ scipy==1.9.3 \
|
||||
# scikit-learn
|
||||
# torch-fidelity
|
||||
# torchdiffeq
|
||||
# torchsde
|
||||
semver==2.13.0 \
|
||||
--hash=sha256:ced8b23dceb22134307c1b8abfa523da14198793d9787ac838e70e29e77458d4 \
|
||||
--hash=sha256:fa0fe2722ee1c3f57eac478820c3a5ae2f624af8264cbdf9000c980ff7f75e3f
|
||||
@ -1863,6 +1870,7 @@ torch==1.12.0+cu116 ; platform_system == "Linux" or platform_system == "Windows"
|
||||
# torch-fidelity
|
||||
# torchdiffeq
|
||||
# torchmetrics
|
||||
# torchsde
|
||||
# torchvision
|
||||
torch-fidelity==0.3.0 \
|
||||
--hash=sha256:3d3e33db98919759cc4f3f24cb27e1e74bdc7c905d90a780630e4e1c18492b66 \
|
||||
@ -1876,6 +1884,10 @@ torchmetrics==0.10.2 \
|
||||
--hash=sha256:43757d82266969906fc74b6e80766fcb2a0d52d6c3d09e3b7c98cf3b733fd20c \
|
||||
--hash=sha256:daa29d96bff5cff04d80eec5b9f5076993d6ac9c2d2163e88b6b31f8d38f7c25
|
||||
# via pytorch-lightning
|
||||
torchsde==0.2.5 \
|
||||
--hash=sha256:222be9e15610d37a4b5a71cfa0c442178f9fd9ca02f6522a3e11c370b3d0906b \
|
||||
--hash=sha256:4c34373a94a357bdf60bbfee00c850f3563d634491555820b900c9a4f7eff300
|
||||
# via k-diffusion
|
||||
torchvision==0.13.0+cu116 ; platform_system == "Linux" or platform_system == "Windows" \
|
||||
--hash=sha256:1696feadf1921c8fa1549bad774221293298288ebedaa14e44bc3e57e964a369 \
|
||||
--hash=sha256:572544b108eaf12638f3dca0f496a453c4b8d8256bcc8333d5355df641c0380c \
|
||||
@ -1925,6 +1937,9 @@ tqdm==4.64.1 \
|
||||
# taming-transformers-rom1504
|
||||
# torch-fidelity
|
||||
# transformers
|
||||
trampoline==0.1.2 \
|
||||
--hash=sha256:36cc9a4ff9811843d177fc0e0740efbd7da39eadfe6e50c9e2937cbc06d899d9
|
||||
# via torchsde
|
||||
transformers==4.24.0 \
|
||||
--hash=sha256:486f353a8e594002e48be0e2aba723d96eda839e63bfe274702a4b5eda85559b \
|
||||
--hash=sha256:b7ab50039ef9bf817eff14ab974f306fd20a72350bdc9df3a858fd009419322e
|
@ -1,5 +1,6 @@
|
||||
--prefer-binary
|
||||
--extra-index-url https://download.pytorch.org/whl/torch_stable.html
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https://download.pytorch.org
|
||||
accelerate~=0.14
|
||||
albumentations
|
||||
@ -25,6 +26,7 @@ transformers
|
||||
picklescan
|
||||
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
|
||||
https://github.com/invoke-ai/clipseg/archive/1f754751c85d7d4255fa681f4491ff5711c1c288.zip
|
||||
https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip
|
||||
https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip
|
||||
https://github.com/invoke-ai/GFPGAN/archive/3f5d2397361199bc4a91c08bb7d80f04d7805615.zip ; platform_system=='Windows'
|
||||
https://github.com/invoke-ai/GFPGAN/archive/c796277a1cf77954e5fc0b288d7062d162894248.zip ; platform_system=='Linux' or platform_system=='Darwin'
|
||||
https://github.com/Birch-san/k-diffusion/archive/363386981fee88620709cf8f6f2eea167bd6cd74.zip
|
||||
https://github.com/invoke-ai/PyPatchMatch/archive/129863937a8ab37f6bbcec327c994c0f932abdbc.zip
|
@ -171,12 +171,12 @@ title: Changelog
|
||||
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
||||
unecesarry caches by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1293
|
||||
- add --no-interactive to preload_models step by @mauwii in
|
||||
- add --no-interactive to configure_invokeai step by @mauwii in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1302
|
||||
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
||||
contained environment (if necessary) before running the normal installation
|
||||
script by @cmdr2 in https://github.com/invoke-ai/InvokeAI/pull/1253
|
||||
- preload_models.py script downloads the weight files by @lstein in
|
||||
- configure_invokeai.py script downloads the weight files by @lstein in
|
||||
https://github.com/invoke-ai/InvokeAI/pull/1290
|
||||
|
||||
## v2.0.1 <small>(13 October 2022)</small>
|
||||
|
@ -130,20 +130,34 @@ file should contain the startup options as you would type them on the
|
||||
command line (`--steps=10 --grid`), one argument per line, or a
|
||||
mixture of both using any of the accepted command switch formats:
|
||||
|
||||
!!! example ""
|
||||
!!! example "my unmodified initialization file"
|
||||
|
||||
```bash
|
||||
--web
|
||||
--steps=28
|
||||
--grid
|
||||
-f 0.6 -C 11.0 -A k_euler_a
|
||||
```bash title="~/.invokeai" linenums="1"
|
||||
# InvokeAI initialization file
|
||||
# This is the InvokeAI initialization file, which contains command-line default values.
|
||||
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
||||
# or renaming it and then running configure_invokeai.py again.
|
||||
|
||||
# The --root option below points to the folder in which InvokeAI stores its models, configs and outputs.
|
||||
--root="/Users/mauwii/invokeai"
|
||||
|
||||
# the --outdir option controls the default location of image files.
|
||||
--outdir="/Users/mauwii/invokeai/outputs"
|
||||
|
||||
# You may place other frequently-used startup commands here, one or more per line.
|
||||
# Examples:
|
||||
# --web --host=0.0.0.0
|
||||
# --steps=20
|
||||
# -Ak_euler_a -C10.0
|
||||
```
|
||||
|
||||
Note that the initialization file only accepts the command line arguments.
|
||||
There are additional arguments that you can provide on the `invoke>` command
|
||||
line (such as `-n` or `--iterations`) that cannot be entered into this file.
|
||||
Also be alert for empty blank lines at the end of the file, which will cause
|
||||
an arguments error at startup time.
|
||||
!!! note
|
||||
|
||||
The initialization file only accepts the command line arguments.
|
||||
There are additional arguments that you can provide on the `invoke>` command
|
||||
line (such as `-n` or `--iterations`) that cannot be entered into this file.
|
||||
Also be alert for empty blank lines at the end of the file, which will cause
|
||||
an arguments error at startup time.
|
||||
|
||||
## List of prompt arguments
|
||||
|
||||
@ -195,15 +209,17 @@ Here are the invoke> command that apply to txt2img:
|
||||
| `--with_variations <pattern>` | | `None` | Combine two or more variations. See [Variations](./VARIATIONS.md) for now to use this. |
|
||||
| `--save_intermediates <n>` | | `None` | Save the image from every nth step into an "intermediates" folder inside the output directory |
|
||||
|
||||
Note that the width and height of the image must be multiples of 64. You can
|
||||
provide different values, but they will be rounded down to the nearest multiple
|
||||
of 64.
|
||||
!!! note
|
||||
|
||||
### This is an example of img2img:
|
||||
the width and height of the image must be multiples of 64. You can
|
||||
provide different values, but they will be rounded down to the nearest multiple
|
||||
of 64.
|
||||
|
||||
```
|
||||
invoke> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
|
||||
```
|
||||
!!! example "This is a example of img2img"
|
||||
|
||||
```bash
|
||||
invoke> waterfall and rainbow -I./vacation-photo.png -W640 -H480 --fit
|
||||
```
|
||||
|
||||
This will modify the indicated vacation photograph by making it more like the
|
||||
prompt. Results will vary greatly depending on what is in the image. We also ask
|
||||
@ -253,7 +269,7 @@ description of the part of the image to replace. For example, if you have an
|
||||
image of a breakfast plate with a bagel, toast and scrambled eggs, you can
|
||||
selectively mask the bagel and replace it with a piece of cake this way:
|
||||
|
||||
```
|
||||
```bash
|
||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel
|
||||
```
|
||||
|
||||
@ -265,7 +281,7 @@ are getting too much or too little masking you can adjust the threshold down (to
|
||||
get more mask), or up (to get less). In this example, by passing `-tm` a higher
|
||||
value, we are insisting on a more stringent classification.
|
||||
|
||||
```
|
||||
```bash
|
||||
invoke> a piece of cake -I /path/to/breakfast.png -tm bagel 0.6
|
||||
```
|
||||
|
||||
@ -275,16 +291,16 @@ You can load and use hundreds of community-contributed Textual
|
||||
Inversion models just by typing the appropriate trigger phrase. Please
|
||||
see [Concepts Library](CONCEPTS.md) for more details.
|
||||
|
||||
# Other Commands
|
||||
## Other Commands
|
||||
|
||||
The CLI offers a number of commands that begin with "!".
|
||||
|
||||
## Postprocessing images
|
||||
### Postprocessing images
|
||||
|
||||
To postprocess a file using face restoration or upscaling, use the `!fix`
|
||||
command.
|
||||
|
||||
### `!fix`
|
||||
#### `!fix`
|
||||
|
||||
This command runs a post-processor on a previously-generated image. It takes a
|
||||
PNG filename or path and applies your choice of the `-U`, `-G`, or `--embiggen`
|
||||
@ -311,19 +327,19 @@ Some examples:
|
||||
[1] outputs/img-samples/000017.4829112.gfpgan-00.png: !fix "outputs/img-samples/0000045.4829112.png" -s 50 -S -W 512 -H 512 -C 7.5 -A k_lms -G 0.8
|
||||
```
|
||||
|
||||
### !mask
|
||||
#### `!mask`
|
||||
|
||||
This command takes an image, a text prompt, and uses the `clipseg` algorithm to
|
||||
automatically generate a mask of the area that matches the text prompt. It is
|
||||
useful for debugging the text masking process prior to inpainting with the
|
||||
`--text_mask` argument. See [INPAINTING.md] for details.
|
||||
|
||||
## Model selection and importation
|
||||
### Model selection and importation
|
||||
|
||||
The CLI allows you to add new models on the fly, as well as to switch among them
|
||||
rapidly without leaving the script.
|
||||
|
||||
### !models
|
||||
#### `!models`
|
||||
|
||||
This prints out a list of the models defined in `config/models.yaml'. The active
|
||||
model is bold-faced
|
||||
@ -336,7 +352,7 @@ laion400m not loaded <no description>
|
||||
waifu-diffusion not loaded Waifu Diffusion v1.3
|
||||
</pre>
|
||||
|
||||
### !switch <model>
|
||||
#### `!switch <model>`
|
||||
|
||||
This quickly switches from one model to another without leaving the CLI script.
|
||||
`invoke.py` uses a memory caching system; once a model has been loaded,
|
||||
@ -361,7 +377,7 @@ invoke> !switch waifu-diffusion
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Using faster float16 precision
|
||||
>> Model loaded in 18.24s
|
||||
>> Max VRAM used to load the model: 2.17G
|
||||
>> Max VRAM used to load the model: 2.17G
|
||||
>> Current VRAM usage:2.17G
|
||||
>> Setting Sampler to k_lms
|
||||
|
||||
@ -381,7 +397,7 @@ laion400m not loaded <no description>
|
||||
waifu-diffusion cached Waifu Diffusion v1.3
|
||||
</pre>
|
||||
|
||||
### !import_model <path/to/model/weights>
|
||||
#### `!import_model <path/to/model/weights>`
|
||||
|
||||
This command imports a new model weights file into InvokeAI, makes it available
|
||||
for image generation within the script, and writes out the configuration for the
|
||||
@ -428,10 +444,10 @@ OK to import [n]? <b>y</b>
|
||||
| Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
|
||||
| Making attention of type 'vanilla' with 512 in_channels
|
||||
| Using faster float16 precision
|
||||
invoke>
|
||||
invoke>
|
||||
</pre>
|
||||
|
||||
###!edit_model <name_of_model>
|
||||
#### `!edit_model <name_of_model>`
|
||||
|
||||
The `!edit_model` command can be used to modify a model that is already defined
|
||||
in `config/models.yaml`. Call it with the short name of the model you wish to
|
||||
@ -468,12 +484,12 @@ text... Outputs: [2] outputs/img-samples/000018.2273800735.embiggen-00.png: !fix
|
||||
"outputs/img-samples/000017.243781548.gfpgan-00.png" -s 50 -S 2273800735 -W 512
|
||||
-H 512 -C 7.5 -A k_lms --embiggen 3.0 0.75 0.25 ```
|
||||
|
||||
## History processing
|
||||
### History processing
|
||||
|
||||
The CLI provides a series of convenient commands for reviewing previous actions,
|
||||
retrieving them, modifying them, and re-running them.
|
||||
|
||||
### !history
|
||||
#### `!history`
|
||||
|
||||
The invoke script keeps track of all the commands you issue during a session,
|
||||
allowing you to re-run them. On Mac and Linux systems, it also writes the
|
||||
@ -485,20 +501,22 @@ during the session (Windows), or the most recent 1000 commands (Mac|Linux). You
|
||||
can then repeat a command by using the command `!NNN`, where "NNN" is the
|
||||
history line number. For example:
|
||||
|
||||
```bash
|
||||
invoke> !history
|
||||
...
|
||||
[14] happy woman sitting under tree wearing broad hat and flowing garment
|
||||
[15] beautiful woman sitting under tree wearing broad hat and flowing garment
|
||||
[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6
|
||||
[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
...
|
||||
invoke> !20
|
||||
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
```
|
||||
!!! example ""
|
||||
|
||||
### !fetch
|
||||
```bash
|
||||
invoke> !history
|
||||
...
|
||||
[14] happy woman sitting under tree wearing broad hat and flowing garment
|
||||
[15] beautiful woman sitting under tree wearing broad hat and flowing garment
|
||||
[18] beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6
|
||||
[20] watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
...
|
||||
invoke> !20
|
||||
invoke> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
```
|
||||
|
||||
####`!fetch`
|
||||
|
||||
This command retrieves the generation parameters from a previously generated
|
||||
image and either loads them into the command line (Linux|Mac), or prints them
|
||||
@ -508,33 +526,36 @@ a folder with image png files, and wildcard \*.png to retrieve the dream command
|
||||
used to generate the images, and save them to a file commands.txt for further
|
||||
processing.
|
||||
|
||||
This example loads the generation command for a single png file:
|
||||
!!! example "load the generation command for a single png file"
|
||||
|
||||
```bash
|
||||
invoke> !fetch 0000015.8929913.png
|
||||
# the script returns the next line, ready for editing and running:
|
||||
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
||||
```
|
||||
```bash
|
||||
invoke> !fetch 0000015.8929913.png
|
||||
# the script returns the next line, ready for editing and running:
|
||||
invoke> a fantastic alien landscape -W 576 -H 512 -s 60 -A plms -C 7.5
|
||||
```
|
||||
|
||||
This one fetches the generation commands from a batch of files and stores them
|
||||
into `selected.txt`:
|
||||
!!! example "fetch the generation commands from a batch of files and store them into `selected.txt`"
|
||||
|
||||
```bash
|
||||
invoke> !fetch outputs\selected-imgs\*.png selected.txt
|
||||
```
|
||||
```bash
|
||||
invoke> !fetch outputs\selected-imgs\*.png selected.txt
|
||||
```
|
||||
|
||||
### !replay
|
||||
#### `!replay`
|
||||
|
||||
This command replays a text file generated by !fetch or created manually
|
||||
|
||||
```
|
||||
invoke> !replay outputs\selected-imgs\selected.txt
|
||||
```
|
||||
!!! example
|
||||
|
||||
Note that these commands may behave unexpectedly if given a PNG file that was
|
||||
not generated by InvokeAI.
|
||||
```bash
|
||||
invoke> !replay outputs\selected-imgs\selected.txt
|
||||
```
|
||||
|
||||
### !search <search string>
|
||||
!!! note
|
||||
|
||||
These commands may behave unexpectedly if given a PNG file that was
|
||||
not generated by InvokeAI.
|
||||
|
||||
#### `!search <search string>`
|
||||
|
||||
This is similar to !history but it only returns lines that contain
|
||||
`search string`. For example:
|
||||
@ -544,7 +565,7 @@ invoke> !search surreal
|
||||
[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194
|
||||
```
|
||||
|
||||
### `!clear`
|
||||
#### `!clear`
|
||||
|
||||
This clears the search history from memory and disk. Be advised that this
|
||||
operation is irreversible and does not issue any warnings!
|
||||
|
@ -1,130 +1,110 @@
|
||||
---
|
||||
title: The Hugging Face Concepts Library and Importing Textual Inversion files
|
||||
title: Concepts Library
|
||||
---
|
||||
|
||||
# :material-file-document: Concepts Library
|
||||
# :material-library-shelves: The Hugging Face Concepts Library and Importing Textual Inversion files
|
||||
|
||||
## Using Textual Inversion Files
|
||||
|
||||
Textual inversion (TI) files are small models that customize the output of
|
||||
Stable Diffusion image generation. They can augment SD with
|
||||
specialized subjects and artistic styles. They are also known as
|
||||
"embeds" in the machine learning world.
|
||||
Stable Diffusion image generation. They can augment SD with specialized subjects
|
||||
and artistic styles. They are also known as "embeds" in the machine learning
|
||||
world.
|
||||
|
||||
Each TI file introduces one or more vocabulary terms to the SD
|
||||
model. These are known in InvokeAI as "triggers." Triggers are often,
|
||||
but not always, denoted using angle brackets as in
|
||||
"<trigger-phrase>". The two most common type of TI files that you'll
|
||||
encounter are `.pt` and `.bin` files, which are produced by different
|
||||
TI training packages. InvokeAI supports both formats, but its [built-in
|
||||
TI training system](TEXTUAL_INVERSION.md) produces `.pt`.
|
||||
Each TI file introduces one or more vocabulary terms to the SD model. These are
|
||||
known in InvokeAI as "triggers." Triggers are often, but not always, denoted
|
||||
using angle brackets as in "<trigger-phrase>". The two most common type of
|
||||
TI files that you'll encounter are `.pt` and `.bin` files, which are produced by
|
||||
different TI training packages. InvokeAI supports both formats, but its
|
||||
[built-in TI training system](TEXTUAL_INVERSION.md) produces `.pt`.
|
||||
|
||||
The [Hugging Face company](https://huggingface.co/sd-concepts-library)
|
||||
has amassed a large ligrary of >800 community-contributed TI files
|
||||
covering a broad range of subjects and styles. InvokeAI has built-in
|
||||
support for this library which downloads and merges TI files
|
||||
automatically upon request. You can also install your own or others'
|
||||
TI files by placing them in a designated directory.
|
||||
The [Hugging Face company](https://huggingface.co/sd-concepts-library) has
|
||||
amassed a large ligrary of >800 community-contributed TI files covering a
|
||||
broad range of subjects and styles. InvokeAI has built-in support for this
|
||||
library which downloads and merges TI files automatically upon request. You can
|
||||
also install your own or others' TI files by placing them in a designated
|
||||
directory.
|
||||
|
||||
### An Example
|
||||
|
||||
Here are a few examples to illustrate how it works. All these images
|
||||
were generated using the command-line client and the Stable Diffusion
|
||||
1.5 model:
|
||||
Here are a few examples to illustrate how it works. All these images were
|
||||
generated using the command-line client and the Stable Diffusion 1.5 model:
|
||||
|
||||
Japanese gardener
|
||||
<br>
|
||||
<img src="../assets/concepts/image1.png">
|
||||
|
||||
Japanese gardener <ghibli-face>
|
||||
<br>
|
||||
<img src="../assets/concepts/image2.png">
|
||||
|
||||
Japanese gardener <hoi4-leaders>
|
||||
<br>
|
||||
<img src="../assets/concepts/image3.png">
|
||||
|
||||
Japanese gardener <cartoona-animals>
|
||||
<br>
|
||||
<img src="../assets/concepts/image4.png">
|
||||
| Japanese gardener | Japanese gardener <ghibli-face> | Japanese gardener <hoi4-leaders> | Japanese gardener <cartoona-animals> |
|
||||
| :--------------------------------: | :-----------------------------------: | :------------------------------------: | :----------------------------------------: |
|
||||
|  |  |  |  |
|
||||
|
||||
You can also combine styles and concepts:
|
||||
|
||||
A portrait of <alf> in <cartoona-animal> style
|
||||
<br>
|
||||
<img src="../assets/concepts/image5.png">
|
||||
|
||||
<figure markdown>
|
||||

|
||||
<figcaption>A portrait of <alf> in <cartoona-animal> style</figcaption>
|
||||
</figure>
|
||||
## Using a Hugging Face Concept
|
||||
|
||||
Hugging Face TI concepts are downloaded and installed automatically as
|
||||
you require them. This requires your machine to be connected to the
|
||||
Internet. To find out what each concept is for, you can browse the
|
||||
[Hugging Face concepts
|
||||
library](https://huggingface.co/sd-concepts-library) and look at
|
||||
examples of what each concept produces.
|
||||
Hugging Face TI concepts are downloaded and installed automatically as you
|
||||
require them. This requires your machine to be connected to the Internet. To
|
||||
find out what each concept is for, you can browse the
|
||||
[Hugging Face concepts library](https://huggingface.co/sd-concepts-library) and
|
||||
look at examples of what each concept produces.
|
||||
|
||||
When you have an idea of a concept you wish to try, go to the
|
||||
command-line client (CLI) and type a "<" character and the beginning
|
||||
of the Hugging Face concept name you wish to load. Press the Tab key,
|
||||
and the CLI will show you all matching concepts. You can also type "<"
|
||||
and Tab to get a listing of all ~800 concepts, but be prepared to
|
||||
scroll up to see them all! If there is more than one match you can
|
||||
continue to type and Tab until the concept is completed.
|
||||
When you have an idea of a concept you wish to try, go to the command-line
|
||||
client (CLI) and type a "<" character and the beginning of the Hugging Face
|
||||
concept name you wish to load. Press the Tab key, and the CLI will show you all
|
||||
matching concepts. You can also type "<" and Tab to get a listing of all ~800
|
||||
concepts, but be prepared to scroll up to see them all! If there is more than
|
||||
one match you can continue to type and Tab until the concept is completed.
|
||||
|
||||
For example if you type "<x" and Tab, you'll be prompted with the completions:
|
||||
For example if you type "<x" and Tab, you'll be prompted with the
|
||||
completions:
|
||||
|
||||
```
|
||||
<xatu2> <xatu> <xbh> <xi> <xidiversity> <xioboma> <xuna> <xyz>
|
||||
<xatu2> <xatu> <xbh> <xi> <xidiversity> <xioboma> <xuna> <xyz>
|
||||
```
|
||||
|
||||
Now type "id" and press Tab. It will be autocompleted to
|
||||
"<xidiversity>" because this is a unique match.
|
||||
Now type "id" and press Tab. It will be autocompleted to "<xidiversity>"
|
||||
because this is a unique match.
|
||||
|
||||
Finish your prompt and generate as usual. You may include multiple
|
||||
concept terms in the prompt.
|
||||
Finish your prompt and generate as usual. You may include multiple concept terms
|
||||
in the prompt.
|
||||
|
||||
If you have never used this concept before, you will see a message
|
||||
that the TI model is being downloaded and installed. After this, the
|
||||
concept will be saved locally (in the `models/sd-concepts-library`
|
||||
directory) for future use.
|
||||
If you have never used this concept before, you will see a message that the TI
|
||||
model is being downloaded and installed. After this, the concept will be saved
|
||||
locally (in the `models/sd-concepts-library` directory) for future use.
|
||||
|
||||
Several steps happen during downloading and
|
||||
installation, including a scan of the file for malicious code. Should
|
||||
any errors occur, you will be warned and the concept will fail to
|
||||
load. Generation will then continue treating the trigger term as a
|
||||
normal string of characters (e.g. as literal "<ghibli-face>").
|
||||
Several steps happen during downloading and installation, including a scan of
|
||||
the file for malicious code. Should any errors occur, you will be warned and the
|
||||
concept will fail to load. Generation will then continue treating the trigger
|
||||
term as a normal string of characters (e.g. as literal "<ghibli-face>").
|
||||
|
||||
Currently auto-installation of concepts is a feature only available on
|
||||
the command-line client. Support for the WebUI is a work in progress.
|
||||
Currently auto-installation of concepts is a feature only available on the
|
||||
command-line client. Support for the WebUI is a work in progress.
|
||||
|
||||
## Installing your Own TI Files
|
||||
|
||||
You may install any number of `.pt` and `.bin` files simply by copying
|
||||
them into the `embeddings` directory of the InvokeAI runtime directory
|
||||
(usually `invokeai` in your home directory). You may create
|
||||
subdirectories in order to organize the files in any way you wish. Be
|
||||
careful not to overwrite one file with another. For example, TI files
|
||||
generated by the Hugging Face toolkit share the named
|
||||
`learned_embedding.bin`. You can use subdirectories to keep them
|
||||
distinct.
|
||||
You may install any number of `.pt` and `.bin` files simply by copying them into
|
||||
the `embeddings` directory of the InvokeAI runtime directory (usually `invokeai`
|
||||
in your home directory). You may create subdirectories in order to organize the
|
||||
files in any way you wish. Be careful not to overwrite one file with another.
|
||||
For example, TI files generated by the Hugging Face toolkit share the named
|
||||
`learned_embedding.bin`. You can use subdirectories to keep them distinct.
|
||||
|
||||
At startup time, InvokeAI will scan the `embeddings` directory and
|
||||
load any TI files it finds there. At startup you will see a message
|
||||
similar to this one:
|
||||
At startup time, InvokeAI will scan the `embeddings` directory and load any TI
|
||||
files it finds there. At startup you will see a message similar to this one:
|
||||
|
||||
```
|
||||
```bash
|
||||
>> Current embedding manager terms: *, <HOI4-Leader>, <princess-knight>
|
||||
```
|
||||
|
||||
Note the "*" trigger term. This is a placeholder term that many early
|
||||
TI tutorials taught people to use rather than a more descriptive
|
||||
term. Unfortunately, if you have multiple TI files that all use this
|
||||
term, only the first one loaded will be triggered by use of the term.
|
||||
Note the `*` trigger term. This is a placeholder term that many early TI
|
||||
tutorials taught people to use rather than a more descriptive term.
|
||||
Unfortunately, if you have multiple TI files that all use this term, only the
|
||||
first one loaded will be triggered by use of the term.
|
||||
|
||||
To avoid this problem, you can use the `merge_embeddings.py` script to
|
||||
merge two or more TI files together. If it encounters a collision of
|
||||
terms, the script will prompt you to select new terms that do not
|
||||
collide. See [Textual Inversion](TEXTUAL_INVERSION.md) for details.
|
||||
To avoid this problem, you can use the `merge_embeddings.py` script to merge two
|
||||
or more TI files together. If it encounters a collision of terms, the script
|
||||
will prompt you to select new terms that do not collide. See
|
||||
[Textual Inversion](TEXTUAL_INVERSION.md) for details.
|
||||
|
||||
## Further Reading
|
||||
|
||||
|
@ -12,21 +12,19 @@ stable diffusion to build the prompt on top of the image you provide, preserving
|
||||
the original's basic shape and layout. To use it, provide the `--init_img`
|
||||
option as shown here:
|
||||
|
||||
```commandline
|
||||
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
|
||||
```
|
||||
!!! example ""
|
||||
|
||||
This will take the original image shown here:
|
||||
```commandline
|
||||
tree on a hill with a river, nature photograph, national geographic -I./test-pictures/tree-and-river-sketch.png -f 0.85
|
||||
```
|
||||
|
||||
<figure markdown>
|
||||
{ width=320 }
|
||||
</figure>
|
||||
<figure markdown>
|
||||
|
||||
and generate a new image based on it as shown here:
|
||||
| original image | generated image |
|
||||
| :------------: | :-------------: |
|
||||
| { width=320 } | { width=320 } |
|
||||
|
||||
<figure markdown>
|
||||
{ width=320 }
|
||||
</figure>
|
||||
</figure>
|
||||
|
||||
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength`
|
||||
(`-f`) controls how much the original will be modified, ranging from `0.0` (keep
|
||||
@ -88,13 +86,15 @@ from a prompt. If the step count is 10, then the "latent space" (Stable
|
||||
Diffusion's internal representation of the image) for the prompt "fire" with
|
||||
seed `1592514025` develops something like this:
|
||||
|
||||
```bash
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||
```
|
||||
!!! example ""
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
```bash
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||
```
|
||||
|
||||
<figure markdown>
|
||||
{ width=720 }
|
||||
</figure>
|
||||
|
||||
Put simply: starting from a frame of fuzz/static, SD finds details in each frame
|
||||
that it thinks look like "fire" and brings them a little bit more into focus,
|
||||
@ -109,25 +109,23 @@ into the sequence at the appropriate point, with just the right amount of noise.
|
||||
|
||||
### A concrete example
|
||||
|
||||
I want SD to draw a fire based on this hand-drawn image:
|
||||
!!! example "I want SD to draw a fire based on this hand-drawn image"
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
{ align=left }
|
||||
|
||||
Let's only do 10 steps, to make it easier to see what's happening. If strength
|
||||
is `0.7`, this is what the internal steps the algorithm has to take will look
|
||||
like:
|
||||
Let's only do 10 steps, to make it easier to see what's happening. If strength
|
||||
is `0.7`, this is what the internal steps the algorithm has to take will look
|
||||
like:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
With strength `0.4`, the steps look more like this:
|
||||
With strength `0.4`, the steps look more like this:
|
||||
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
<figure markdown>
|
||||

|
||||
</figure>
|
||||
|
||||
Notice how much more fuzzy the starting image is for strength `0.7` compared to
|
||||
`0.4`, and notice also how much longer the sequence is with `0.7`:
|
||||
|
@ -120,7 +120,7 @@ A number of caveats:
|
||||
(`--iterations`) argument.
|
||||
|
||||
3. Your results will be _much_ better if you use the `inpaint-1.5` model
|
||||
released by runwayML and installed by default by `scripts/preload_models.py`.
|
||||
released by runwayML and installed by default by `scripts/configure_invokeai.py`.
|
||||
This model was trained specifically to harmoniously fill in image gaps. The
|
||||
standard model will work as well, but you may notice color discontinuities at
|
||||
the border.
|
||||
|
@ -28,21 +28,17 @@ should "just work" without further intervention. Simply pass the `--upscale`
|
||||
the popup in the Web GUI.
|
||||
|
||||
**GFPGAN** requires a series of downloadable model files to work. These are
|
||||
loaded when you run `scripts/preload_models.py`. If GFPAN is failing with an
|
||||
loaded when you run `scripts/configure_invokeai.py`. If GFPAN is failing with an
|
||||
error, please run the following from the InvokeAI directory:
|
||||
|
||||
```bash
|
||||
python scripts/preload_models.py
|
||||
python scripts/configure_invokeai.py
|
||||
```
|
||||
|
||||
If you do not run this script in advance, the GFPGAN module will attempt to
|
||||
download the models files the first time you try to perform facial
|
||||
reconstruction.
|
||||
|
||||
## Usage
|
||||
|
||||
You will now have access to two new prompt arguments.
|
||||
|
||||
### Upscaling
|
||||
|
||||
`-U : <upscaling_factor> <upscaling_strength>`
|
||||
@ -110,7 +106,7 @@ This repo also allows you to perform face restoration using
|
||||
[CodeFormer](https://github.com/sczhou/CodeFormer).
|
||||
|
||||
In order to setup CodeFormer to work, you need to download the models like with
|
||||
GFPGAN. You can do this either by running `preload_models.py` or by manually
|
||||
GFPGAN. You can do this either by running `configure_invokeai.py` or by manually
|
||||
downloading the
|
||||
[model file](https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth)
|
||||
and saving it to `ldm/invoke/restoration/codeformer/weights` folder.
|
||||
@ -119,7 +115,7 @@ You can use `-ft` prompt argument to swap between CodeFormer and the default
|
||||
GFPGAN. The above mentioned `-G` prompt argument will allow you to control the
|
||||
strength of the restoration effect.
|
||||
|
||||
### Usage
|
||||
### CodeFormer Usage
|
||||
|
||||
The following command will perform face restoration with CodeFormer instead of
|
||||
the default gfpgan.
|
||||
@ -160,7 +156,7 @@ A new file named `000044.2945021133.fixed.png` will be created in the output
|
||||
directory. Note that the `!fix` command does not replace the original file,
|
||||
unlike the behavior at generate time.
|
||||
|
||||
### Disabling
|
||||
## How to disable
|
||||
|
||||
If, for some reason, you do not wish to load the GFPGAN and/or ESRGAN libraries,
|
||||
you can disable them on the invoke.py command line with the `--no_restore` and
|
||||
|
5
docs/features/index.md
Normal file
5
docs/features/index.md
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
title: Overview
|
||||
---
|
||||
|
||||
Here you can find the documentation for different features.
|
@ -86,6 +86,10 @@ AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
|
||||
Linux users who wish to make use of the PyPatchMatch inpainting
|
||||
functions will need to perform a bit of extra work to enable this
|
||||
module. Instructions can be found at [Installing PyPatchMatch](installation/INSTALL_PATCHMATCH.md).
|
||||
|
||||
## :fontawesome-solid-computer: Hardware Requirements
|
||||
|
||||
### :octicons-cpu-24: System
|
||||
@ -123,7 +127,8 @@ You wil need one of the following:
|
||||
|
||||
- [The InvokeAI Web Interface](features/WEB.md)
|
||||
- [WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
||||
<!-- this link does not exist - [WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md) -->
|
||||
- [WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||
<!-- seperator -->
|
||||
- [The Command Line Interace](features/CLI.md)
|
||||
- [Image2Image](features/IMG2IMG.md)
|
||||
- [Inpainting](features/INPAINTING.md)
|
||||
@ -136,6 +141,7 @@ You wil need one of the following:
|
||||
- [Prompt Engineering](features/PROMPTS.md)
|
||||
<!-- seperator -->
|
||||
- Miscellaneous
|
||||
- [NSFW Checker](features/NSFW.md)
|
||||
- [Embiggen upscaling](features/EMBIGGEN.md)
|
||||
- [Other](features/OTHER.md)
|
||||
|
||||
@ -160,7 +166,7 @@ You wil need one of the following:
|
||||
- You can now load
|
||||
[multiple models and switch among them quickly](https://docs.google.com/presentation/d/1WywGA1rny7bpFh7CLSdTr4nNpVKdlUeT0Bj0jCsILyU/edit?usp=sharing)
|
||||
without leaving the CLI.
|
||||
- The installation process (via `scripts/preload_models.py`) now lets you select
|
||||
- The installation process (via `scripts/configure_invokeai.py`) now lets you select
|
||||
among several popular
|
||||
[Stable Diffusion models](https://invoke-ai.github.io/InvokeAI/installation/INSTALLING_MODELS/)
|
||||
and downloads and installs them on your behalf. Among other models, this
|
||||
|
89
docs/installation/BUILDING_BINARY_INSTALLERS.md
Normal file
89
docs/installation/BUILDING_BINARY_INSTALLERS.md
Normal file
@ -0,0 +1,89 @@
|
||||
---
|
||||
title: build binary installers
|
||||
---
|
||||
|
||||
# :simple-buildkite: How to build "binary" installers (InvokeAI-mac/windows/linux_on_*.zip)
|
||||
|
||||
## 1. Ensure `installers/requirements.in` is correct
|
||||
|
||||
and up to date on the branch to be installed.
|
||||
|
||||
## <a name="step-2"></a> 2. Run `pip-compile` on each platform.
|
||||
|
||||
On each target platform, in the branch that is to be installed, and
|
||||
inside the InvokeAI git root folder, run the following commands:
|
||||
|
||||
```commandline
|
||||
conda activate invokeai # or however you activate python
|
||||
pip install pip-tools
|
||||
pip-compile --allow-unsafe --generate-hashes --output-file=binary_installer/<reqsfile>.txt binary_installer/requirements.in
|
||||
```
|
||||
where `<reqsfile>.txt` is whichever of
|
||||
```commandline
|
||||
py3.10-darwin-arm64-mps-reqs.txt
|
||||
py3.10-darwin-x86_64-reqs.txt
|
||||
py3.10-linux-x86_64-cuda-reqs.txt
|
||||
py3.10-windows-x86_64-cuda-reqs.txt
|
||||
```
|
||||
matches the current OS and architecture.
|
||||
> There is no way to cross-compile these. They must be done on a system matching the target OS and arch.
|
||||
|
||||
## <a name="step-3"></a> 3. Set github repository and branch
|
||||
|
||||
Once all reqs files have been collected and committed **to the branch
|
||||
to be installed**, edit `binary_installer/install.sh.in` and `binary_installer/install.bat.in` so that `RELEASE_URL`
|
||||
and `RELEASE_SOURCEBALL` point to the github repo and branch that is
|
||||
to be installed.
|
||||
|
||||
For example, to install `main` branch of `InvokeAI`, they should be
|
||||
set as follows:
|
||||
|
||||
`install.sh.in`:
|
||||
```commandline
|
||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
```
|
||||
|
||||
`install.bat.in`:
|
||||
```commandline
|
||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/main.tar.gz
|
||||
```
|
||||
|
||||
Or, to install `damians-cool-feature` branch of `damian0815`, set them
|
||||
as follows:
|
||||
|
||||
`install.sh.in`:
|
||||
```commandline
|
||||
RELEASE_URL=https://github.com/damian0815/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/damians-cool-feature.tar.gz
|
||||
```
|
||||
|
||||
`install.bat.in`:
|
||||
```commandline
|
||||
set RELEASE_URL=https://github.com/damian0815/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/damians-cool-feature.tar.gz
|
||||
```
|
||||
|
||||
The branch and repo specified here **must** contain the correct reqs
|
||||
files. The installer zip files **do not** contain requirements files,
|
||||
they are pulled from the specified branch during the installation
|
||||
process.
|
||||
|
||||
## 4. Create zip files.
|
||||
|
||||
cd into the `installers/` folder and run
|
||||
`./create_installers.sh`. This will create
|
||||
`InvokeAI-mac_on_<branch>.zip`,
|
||||
`InvokeAI-windows_on_<branch>.zip` and
|
||||
`InvokeAI-linux_on_<branch>.zip`. These files can be distributed to end users.
|
||||
|
||||
These zips will continue to function as installers for all future
|
||||
pushes to those branches, as long as necessary changes to
|
||||
`requirements.in` are propagated in a timely manner to the
|
||||
`py3.10-*-reqs.txt` files using pip-compile as outlined in [step
|
||||
2](#step-2).
|
||||
|
||||
To actually install, users should unzip the appropriate zip file into an empty
|
||||
folder and run `install.sh` on macOS/Linux or `install.bat` on
|
||||
Windows.
|
@ -56,7 +56,7 @@ unofficial Stable Diffusion models and where they can be obtained.
|
||||
|
||||
There are three ways to install weights files:
|
||||
|
||||
1. During InvokeAI installation, the `preload_models.py` script can download
|
||||
1. During InvokeAI installation, the `configure_invokeai.py` script can download
|
||||
them for you.
|
||||
|
||||
2. You can use the command-line interface (CLI) to import, configure and modify
|
||||
@ -65,13 +65,13 @@ There are three ways to install weights files:
|
||||
3. You can download the files manually and add the appropriate entries to
|
||||
`models.yaml`.
|
||||
|
||||
### Installation via `preload_models.py`
|
||||
### Installation via `configure_invokeai.py`
|
||||
|
||||
This is the most automatic way. Run `scripts/preload_models.py` from the
|
||||
This is the most automatic way. Run `scripts/configure_invokeai.py` from the
|
||||
console. It will ask you to select which models to download and lead you through
|
||||
the steps of setting up a Hugging Face account if you haven't done so already.
|
||||
|
||||
To start, run `python scripts/preload_models.py` from within the InvokeAI:
|
||||
To start, run `python scripts/configure_invokeai.py` from within the InvokeAI:
|
||||
directory
|
||||
|
||||
!!! example ""
|
||||
@ -162,6 +162,12 @@ the command-line client's `!import_model` command.
|
||||
Type a bit of the path name and hit ++tab++ in order to get a choice of
|
||||
possible completions.
|
||||
|
||||
!!! tip "on Windows, you can drag model files onto the command-line"
|
||||
|
||||
Once you have typed in `!import_model `, you can drag the model `.ckpt` file
|
||||
onto the command-line to insert the model path. This way, you don't need to
|
||||
type it or copy/paste.
|
||||
|
||||
4. Follow the wizard's instructions to complete installation as shown in the
|
||||
example here:
|
||||
|
||||
@ -238,7 +244,7 @@ arabian-nights-1.0:
|
||||
| arabian-nights-1.0 | This is the name of the model that you will refer to from within the CLI and the WebGUI when you need to load and use the model. |
|
||||
| description | Any description that you want to add to the model to remind you what it is. |
|
||||
| weights | Relative path to the .ckpt weights file for this model. |
|
||||
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `preload_models.py` script. |
|
||||
| config | This is the confusingly-named configuration file for the model itself. Use `./configs/stable-diffusion/v1-inference.yaml` unless the model happens to need a custom configuration, in which case the place you downloaded it from will tell you what to use instead. For example, the runwayML custom inpainting model requires the file `configs/stable-diffusion/v1-inpainting-inference.yaml`. This is already inclued in the InvokeAI distribution and is configured automatically for you by the `configure_invokeai.py` script. |
|
||||
| vae | If you want to add a VAE file to the model, then enter its path here. |
|
||||
| width, height | This is the width and height of the images used to train the model. Currently they are always 512 and 512. |
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
---
|
||||
title: InvokeAI Installer
|
||||
title: InvokeAI Binary Installer
|
||||
---
|
||||
|
||||
The InvokeAI installer is a shell script that will install InvokeAI onto a stock
|
||||
The InvokeAI binary installer is a shell script that will install InvokeAI onto a stock
|
||||
computer running recent versions of Linux, MacOSX or Windows. It will leave you
|
||||
with a version that runs a stable version of InvokeAI. When a new version of
|
||||
InvokeAI is released, you will download and reinstall the new version.
|
||||
@ -36,7 +36,7 @@ recommended model weights files.
|
||||
|
||||
1. Download the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) of
|
||||
InvokeAI's installer for your platform
|
||||
InvokeAI's installer for your platform. Look for a file named `InvokeAI-binary-<your platform>.zip`
|
||||
|
||||
2. Place the downloaded package someplace where you have plenty of HDD space,
|
||||
and have full permissions (i.e. `~/` on Lin/Mac; your home folder on Windows)
|
||||
|
@ -2,7 +2,7 @@
|
||||
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
||||
---
|
||||
|
||||
# THIS NEEDS TO BE FLESHED OUT
|
||||
# THIS DOCUMENTATION IS UNFINISHED - VOLUNTEERS GRATEFULLY ACCEPTED
|
||||
|
||||
## Introduction
|
||||
|
||||
@ -22,6 +22,4 @@ start running the cells one-by-one.
|
||||
|
||||
### Updating the stable version
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
## Troubleshooting
|
||||
|
@ -155,10 +155,10 @@ command-line completion.
|
||||
process for this is described in [here](INSTALLING_MODELS.md).
|
||||
|
||||
```bash
|
||||
python scripts/preload_models.py
|
||||
python scripts/configure_invokeai.py
|
||||
```
|
||||
|
||||
The script `preload_models.py` will interactively guide you through the
|
||||
The script `configure_invokeai.py` will interactively guide you through the
|
||||
process of downloading and installing the weights files needed for InvokeAI.
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you have to agree to. The script will list the steps you need
|
||||
@ -220,7 +220,7 @@ greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
||||
```bash
|
||||
git pull
|
||||
conda env update
|
||||
python scripts/preload_models.py --no-interactive #optional
|
||||
python scripts/configure_invokeai.py --no-interactive #optional
|
||||
```
|
||||
|
||||
This will bring your local copy into sync with the remote one. The last step may
|
||||
@ -359,7 +359,7 @@ brew install llvm
|
||||
|
||||
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
||||
|
||||
#### `preload_models.py` or `invoke.py` crashes at an early stage
|
||||
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
|
||||
|
||||
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
||||
have linked to the correct environment file and run `conda update` again.
|
||||
|
86
docs/installation/INSTALL_PATCHMATCH.md
Normal file
86
docs/installation/INSTALL_PATCHMATCH.md
Normal file
@ -0,0 +1,86 @@
|
||||
---
|
||||
title: Installing PyPatchMatch
|
||||
---
|
||||
|
||||
# :octicons-paintbrush-16: Installing PyPatchMatch
|
||||
|
||||
pypatchmatch is a Python module for inpainting images. It is not
|
||||
needed to run InvokeAI, but it greatly improves the quality of
|
||||
inpainting and outpainting and is recommended.
|
||||
|
||||
Unfortunately, it is a C++ optimized module and installation
|
||||
can be somewhat challenging. This guide leads you through the steps.
|
||||
|
||||
## Windows
|
||||
|
||||
You're in luck! On Windows platforms PyPatchMatch will install
|
||||
automatically on Windows systems with no extra intervention.
|
||||
|
||||
## Macintosh
|
||||
|
||||
PyPatchMatch is not currently supported, but the team is working on
|
||||
it.
|
||||
|
||||
## Linux
|
||||
|
||||
Prior to installing PyPatchMatch, you need to take the following
|
||||
steps:
|
||||
|
||||
1. Install the `build-essential` tools:
|
||||
|
||||
```
|
||||
sudo apt update
|
||||
sudo apt install build-essential
|
||||
```
|
||||
|
||||
2. Install `opencv`:
|
||||
|
||||
```
|
||||
sudo apt install python3-opencv libopencv-dev
|
||||
```
|
||||
|
||||
3. Fix the naming of the `opencv` package configuration file:
|
||||
|
||||
```
|
||||
cd /usr/lib/x86_64-linux-gnu/pkgconfig/
|
||||
ln -sf opencv4.pc opencv.pc
|
||||
|
||||
4. Activate the environment you use for invokeai, either with
|
||||
`conda` or with a virtual environment.
|
||||
|
||||
5. Do a "develop" install of pypatchmatch:
|
||||
|
||||
```
|
||||
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
|
||||
```
|
||||
|
||||
6. Confirm that pypatchmatch is installed.
|
||||
At the command-line prompt enter `python`, and
|
||||
then at the `>>>` line type `from patchmatch import patch_match`:
|
||||
It should look like the follwing:
|
||||
|
||||
```
|
||||
Python 3.9.5 (default, Nov 23 2021, 15:27:38)
|
||||
[GCC 9.3.0] on linux
|
||||
Type "help", "copyright", "credits" or "license" for more information.
|
||||
>>> from patchmatch import patch_match
|
||||
Compiling and loading c extensions from "/home/lstein/Projects/InvokeAI/.invokeai-env/src/pypatchmatch/patchmatch".
|
||||
rm -rf build/obj libpatchmatch.so
|
||||
mkdir: created directory 'build/obj'
|
||||
mkdir: created directory 'build/obj/csrc/'
|
||||
[dep] csrc/masked_image.cpp ...
|
||||
[dep] csrc/nnf.cpp ...
|
||||
[dep] csrc/inpaint.cpp ...
|
||||
[dep] csrc/pyinterface.cpp ...
|
||||
[CC] csrc/pyinterface.cpp ...
|
||||
[CC] csrc/inpaint.cpp ...
|
||||
[CC] csrc/nnf.cpp ...
|
||||
[CC] csrc/masked_image.cpp ...
|
||||
[link] libpatchmatch.so ...
|
||||
```
|
||||
|
||||
If you see no errors, then you're ready to go!
|
||||
|
||||
|
||||
|
||||
|
@ -27,7 +27,7 @@ Though there are multiple steps, there really is only one click involved to kick
|
||||
off the process.
|
||||
|
||||
1. The source installer is distributed in ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/tag/2.2.0-rc4), and
|
||||
look for a series of files named:
|
||||
|
||||
- invokeAI-src-installer-mac.zip
|
||||
@ -55,7 +55,7 @@ off the process.
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
|
||||
4. Alternatively, form the command line, run the shell script or .bat file:
|
||||
4. Alternatively, from the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeAI
|
||||
@ -66,8 +66,17 @@ off the process.
|
||||
requirements including Conda, Git and Python, then download the current
|
||||
InvokeAI code and install it along with its dependencies.
|
||||
|
||||
Be aware that some of the library download and install steps take a long time.
|
||||
In particular, the `pytorch` package is quite large and often appears to get
|
||||
"stuck" at 99.9%. Similarly, the `pip installing requirements` step may
|
||||
appear to hang. Have patience and the installation step will eventually
|
||||
resume. However, there are occasions when the library install does
|
||||
legitimately get stuck. If you have been waiting for more than ten minutes
|
||||
and nothing is happening, you can interrupt the script with ^C. You may restart
|
||||
it and it will pick up where it left off.
|
||||
|
||||
6. After installation completes, the installer will launch a script called
|
||||
`preload_models.py`, which will guide you through the first-time process of
|
||||
`configure_invokeai.py`, which will guide you through the first-time process of
|
||||
selecting one or more Stable Diffusion model weights files, downloading and
|
||||
configuring them.
|
||||
|
||||
@ -110,6 +119,71 @@ python scripts/invoke.py --web --max_load_models=3 \
|
||||
These options are described in detail in the
|
||||
[Command-Line Interface](../features/CLI.md) documentation.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
_Package dependency conflicts_ If you have previously installed
|
||||
InvokeAI or another Stable Diffusion package, the installer may
|
||||
occasionally pick up outdated libraries and either the installer or
|
||||
`invoke` will fail with complaints out library conflicts. There are
|
||||
two steps you can take to clear this problem. Both of these are done
|
||||
from within the "developer's console", which you can get to by
|
||||
launching `invoke.sh` (or `invoke.bat`) and selecting launch option
|
||||
#3:
|
||||
|
||||
1. Remove the previous `invokeai` environment completely. From within
|
||||
the developer's console, give the command `conda env remove -n
|
||||
invokeai`. This will delete previous files installed by `invoke`.
|
||||
|
||||
Then exit from the developer's console and launch the script
|
||||
`update.sh` (or `update.bat`). This will download the most recent
|
||||
InvokeAI (including bug fixes) and reinstall the environment.
|
||||
You should then be able to run `invoke.sh`/`invoke.bat`.
|
||||
|
||||
2. If this doesn't work, you can try cleaning your system's conda
|
||||
cache. This is slightly more extreme, but won't interfere with
|
||||
any other python-based programs installed on your computer.
|
||||
From the developer's console, run the command `conda clean -a`
|
||||
and answer "yes" to all prompts.
|
||||
|
||||
After this is done, run `update.sh` and try again as before.
|
||||
|
||||
_"Corrupted configuration file."__ Everything seems to install ok, but
|
||||
`invoke` complains of a corrupted configuration file and goes calls
|
||||
`configure_invokeai.py` to fix, but this doesn't fix the problem.
|
||||
|
||||
This issue is often caused by a misconfigured configuration directive
|
||||
in the `.invokeai` initialization file that contains startup settings.
|
||||
This can be corrected by fixing the offending line.
|
||||
|
||||
First find `.invokeai`. It is a small text file located in your home
|
||||
directory, `~/.invokeai` on Mac and Linux systems, and `C:\Users\*your
|
||||
name*\.invokeai` on Windows systems. Open it with a text editor
|
||||
(e.g. Notepad on Windows, TextEdit on Macs, or `nano` on Linux)
|
||||
and look for the lines starting with `--root` and `--outdir`.
|
||||
|
||||
An example is here:
|
||||
|
||||
```cmd
|
||||
--root="/home/lstein/invokeai"
|
||||
--outdir="/home/lstein/invokeai/outputs"
|
||||
```
|
||||
|
||||
There should not be whitespace before or after the directory paths,
|
||||
and the paths should not end with slashes:
|
||||
|
||||
```cmd
|
||||
--root="/home/lstein/invokeai " # wrong! no whitespace here
|
||||
--root="/home\lstein\invokeai\" # wrong! shouldn't end in a slash
|
||||
```
|
||||
|
||||
Fix the problem with your text editor and save as a **plain text**
|
||||
file. This should clear the issue.
|
||||
|
||||
_If none of these maneuvers fixes the problem_ then please report the
|
||||
problem to the [InvokeAI
|
||||
Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive assistance.
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
This section describes how to update InvokeAI to new versions of the software.
|
||||
@ -119,31 +193,15 @@ This section describes how to update InvokeAI to new versions of the software.
|
||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||
To update to the latest released version (recommended), run the `update.sh`
|
||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||
release and re-run the `preload_models` script to download any updated models
|
||||
release and re-run the `configure_invokeai` script to download any updated models
|
||||
files that may be needed. You can also use this to add additional models that
|
||||
you did not select at installation time.
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
There may be times that there is a feature in the `development` branch of
|
||||
InvokeAI that you'd like to take advantage of. Or perhaps there is a branch that
|
||||
corrects an annoying bug. To do this, you will use the developer's console.
|
||||
|
||||
From within the invokeAI directory, run the command `invoke.sh` (Linux/Mac) or
|
||||
`invoke.bat` (Windows) and selection option (3) to open the developers console.
|
||||
Then run the following command to get the `development branch`:
|
||||
|
||||
```bash
|
||||
git checkout development
|
||||
git pull
|
||||
conda env update
|
||||
```
|
||||
|
||||
You can now close the developer console and run `invoke` as before. If you get
|
||||
complaints about missing models, then you may need to do the additional step of
|
||||
running `preload_models.py`. This happens relatively infrequently. To do this,
|
||||
running `configure_invokeai.py`. This happens relatively infrequently. To do this,
|
||||
simply open up the developer's console again and type
|
||||
`python scripts/preload_models.py`.
|
||||
`python scripts/configure_invokeai.py`.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
@ -5,37 +5,26 @@ title: Overview
|
||||
We offer several ways to install InvokeAI, each one suited to your
|
||||
experience and preferences.
|
||||
|
||||
1. [InvokeAI installer](INSTALL_INVOKE.md)
|
||||
1. [InvokeAI source code installer](INSTALL_SOURCE.md)
|
||||
|
||||
This is a installer script that installs InvokeAI and all the
|
||||
third party libraries it depends on. When a new version of
|
||||
InvokeAI is released, you will download and reinstall the new
|
||||
version.
|
||||
This is a script that will install Python, the Anaconda ("conda")
|
||||
package manager, all of InvokeAI's its essential third party
|
||||
libraries and InvokeAI itself. It includes access to a "developer
|
||||
console" which will help us debug problems with you and give you
|
||||
to access experimental features.
|
||||
|
||||
This installer is designed for people who want the system to "just
|
||||
work", don't have an interest in tinkering with it, and do not
|
||||
care about upgrading to unreleased experimental features.
|
||||
When a new InvokeAI feature is available, even between releases,
|
||||
you will be able to upgrade and try it out by running an `update`
|
||||
script. This method is recommended for individuals who wish to
|
||||
stay on the cutting edge of InvokeAI development and are not
|
||||
afraid of occasional breakage.
|
||||
|
||||
**Important Caveats**
|
||||
- This script does not support AMD GPUs. For Linux AMD support,
|
||||
please use the manual or source code installer methods.
|
||||
- This script has difficulty on some Macintosh machines
|
||||
that have previously been used for Python development due to
|
||||
conflicting development tools versions. Mac developers may wish
|
||||
to try the source code installer or one of the manual methods instead.
|
||||
- This script is a bit cranky and occasionally hangs or times out,
|
||||
forcing you to cancel and restart the script (it will pick up where
|
||||
it left off).
|
||||
|
||||
2. [Source code installer](INSTALL_SOURCE.md)
|
||||
|
||||
This is a script that will install InvokeAI and all its essential
|
||||
third party libraries. In contrast to the previous installer, it
|
||||
includes access to a "developer console" which will allow you to
|
||||
access experimental features on the development branch.
|
||||
|
||||
This method is recommended for individuals who are wish to stay
|
||||
on the cutting edge of InvokeAI development and are not afraid
|
||||
of occasional breakage.
|
||||
|
||||
3. [Manual Installation](INSTALL_MANUAL.md)
|
||||
2. [Manual Installation](INSTALL_MANUAL.md)
|
||||
|
||||
In this method you will manually run the commands needed to install
|
||||
InvokeAI and its dependencies. We offer two recipes: one suited to
|
||||
@ -47,14 +36,14 @@ experience and preferences.
|
||||
the cutting edge of future InvokeAI development and is willing to put
|
||||
up with occasional glitches and breakage.
|
||||
|
||||
4. [Docker Installation](INSTALL_DOCKER.md)
|
||||
3. [Docker Installation](INSTALL_DOCKER.md)
|
||||
|
||||
We also offer a method for creating Docker containers containing
|
||||
InvokeAI and its dependencies. This method is recommended for
|
||||
individuals with experience with Docker containers and understand
|
||||
the pluses and minuses of a container-based install.
|
||||
|
||||
5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
||||
4. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
||||
|
||||
This method is suitable for running InvokeAI on a Google Colab
|
||||
account. It is recommended for individuals who have previously
|
||||
|
@ -69,7 +69,7 @@ title: Manual Installation, Linux
|
||||
machine-learning models:
|
||||
|
||||
```bash
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/preload_models.py
|
||||
(invokeai) ~/InvokeAI$ python3 scripts/configure_invokeai.py
|
||||
```
|
||||
|
||||
!!! note
|
||||
@ -79,7 +79,7 @@ title: Manual Installation, Linux
|
||||
and obtaining an access token for downloading. It will then download and
|
||||
install the weights files for you.
|
||||
|
||||
Please look [here](INSTALLING_MODELS.md) for a manual process for doing
|
||||
Please look [here](../INSTALL_MANUAL.md) for a manual process for doing
|
||||
the same thing.
|
||||
|
||||
7. Start generating images!
|
||||
@ -112,7 +112,7 @@ title: Manual Installation, Linux
|
||||
To use an alternative model you may invoke the `!switch` command in
|
||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||
either the CLI or the Web UI. See [Command Line
|
||||
Client](../features/CLI.md#model-selection-and-importation). The
|
||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
||||
model names are defined in `configs/models.yaml`.
|
||||
|
||||
8. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||
|
@ -111,7 +111,7 @@ will do our best to help.
|
||||
|
||||
!!! todo "Download the model weight files"
|
||||
|
||||
The `preload_models.py` script downloads and installs the model weight
|
||||
The `configure_invokeai.py` script downloads and installs the model weight
|
||||
files for you. It will lead you through the process of getting a Hugging Face
|
||||
account, accepting the Stable Diffusion model weight license agreement, and
|
||||
creating a download token:
|
||||
@ -119,7 +119,7 @@ will do our best to help.
|
||||
```bash
|
||||
# This will take some time, depending on the speed of your internet connection
|
||||
# and will consume about 10GB of space
|
||||
python scripts/preload_models.py
|
||||
python scripts/configure_invokeai.py
|
||||
```
|
||||
|
||||
!!! todo "Run InvokeAI!"
|
||||
@ -150,7 +150,7 @@ will do our best to help.
|
||||
To use an alternative model you may invoke the `!switch` command in
|
||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||
either the CLI or the Web UI. See [Command Line
|
||||
Client](../features/CLI.md#model-selection-and-importation). The
|
||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
||||
model names are defined in `configs/models.yaml`.
|
||||
|
||||
---
|
||||
@ -220,8 +220,8 @@ There are several causes of these errors:
|
||||
with "(invokeai)" then you activated it. If it begins with "(base)" or
|
||||
something else you haven't.
|
||||
|
||||
2. You might've run `./scripts/preload_models.py` or `./scripts/invoke.py`
|
||||
instead of `python ./scripts/preload_models.py` or
|
||||
2. You might've run `./scripts/configure_invokeai.py` or `./scripts/invoke.py`
|
||||
instead of `python ./scripts/configure_invokeai.py` or
|
||||
`python ./scripts/invoke.py`. The cause of this error is long so it's below.
|
||||
|
||||
<!-- I could not find out where the error is, otherwise would have marked it as a footnote -->
|
||||
@ -359,7 +359,7 @@ python ./scripts/txt2img.py \
|
||||
### OSError: Can't load tokenizer for 'openai/clip-vit-large-patch14'
|
||||
|
||||
```bash
|
||||
python scripts/preload_models.py
|
||||
python scripts/configure_invokeai.py
|
||||
```
|
||||
|
||||
---
|
||||
|
@ -65,7 +65,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
||||
7. Load the big stable diffusion weights files and a couple of smaller machine-learning models:
|
||||
|
||||
```bash
|
||||
python scripts/preload_models.py
|
||||
python scripts/configure_invokeai.py
|
||||
```
|
||||
|
||||
!!! note
|
||||
@ -75,7 +75,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
||||
obtaining an access token for downloading. It will then download and install the
|
||||
weights files for you.
|
||||
|
||||
Please look [here](INSTALLING_MODELS.md) for a manual process for doing the
|
||||
Please look [here](../INSTALL_MANUAL.md) for a manual process for doing the
|
||||
same thing.
|
||||
|
||||
8. Start generating images!
|
||||
@ -108,7 +108,7 @@ Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehan
|
||||
To use an alternative model you may invoke the `!switch` command in
|
||||
the CLI, or pass `--model <model_name>` during `invoke.py` launch for
|
||||
either the CLI or the Web UI. See [Command Line
|
||||
Client](../features/CLI.md#model-selection-and-importation). The
|
||||
Client](../../features/CLI.md#model-selection-and-importation). The
|
||||
model names are defined in `configs/models.yaml`.
|
||||
|
||||
9. Subsequently, to relaunch the script, first activate the Anaconda
|
||||
|
@ -15,16 +15,16 @@ We thank them for all of their time and hard work.
|
||||
|
||||
## **Current core team**
|
||||
|
||||
* lstein (Lincoln Stein) - Co-maintainer
|
||||
* blessedcoolant - Co-maintainer
|
||||
* hipsterusername (Kent Keirsey) - Product Manager
|
||||
* psychedelicious - Web Team Leader
|
||||
* Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* damian0815 - Attention Systems and Gameplay Engineer
|
||||
* mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||
* Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||
* tildebyte - general gadfly and resident (self-appointed) know-it-all
|
||||
* keturn - Lead for Diffusers port
|
||||
* @lstein (Lincoln Stein) - Co-maintainer
|
||||
* @blessedcoolant - Co-maintainer
|
||||
* @hipsterusername (Kent Keirsey) - Product Manager
|
||||
* @psychedelicious - Web Team Leader
|
||||
* @Kyle0654 (Kyle Schouviller) - Node Architect and General Backend Wizard
|
||||
* @damian0815 - Attention Systems and Gameplay Engineer
|
||||
* @mauwii (Matthias Wild) - Continuous integration and product maintenance engineer
|
||||
* @Netsvetaev (Artur Netsvetaev) - UI/UX Developer
|
||||
* @tildebyte - general gadfly and resident (self-appointed) know-it-all
|
||||
* @keturn - Lead for Diffusers port
|
||||
|
||||
## **Contributions by**
|
||||
|
||||
|
File diff suppressed because one or more lines are too long
2
frontend/dist/index.html
vendored
2
frontend/dist/index.html
vendored
@ -6,7 +6,7 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index.faf4c870.js"></script>
|
||||
<script type="module" crossorigin src="./assets/index.637f12bd.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.c609c0c8.css">
|
||||
</head>
|
||||
|
||||
|
@ -42,7 +42,6 @@ const makeSocketIOEmitters = (
|
||||
options: optionsState,
|
||||
system: systemState,
|
||||
canvas: canvasState,
|
||||
gallery: galleryState,
|
||||
} = state;
|
||||
|
||||
const frontendToBackendParametersConfig: FrontendToBackendParametersConfig =
|
||||
@ -55,13 +54,6 @@ const makeSocketIOEmitters = (
|
||||
|
||||
dispatch(generationRequested());
|
||||
|
||||
if (!['txt2img', 'img2img'].includes(generationMode)) {
|
||||
if (!galleryState.currentImage?.url) return;
|
||||
|
||||
frontendToBackendParametersConfig.imageToProcessUrl =
|
||||
galleryState.currentImage.url;
|
||||
}
|
||||
|
||||
const { generationParameters, esrganParameters, facetoolParameters } =
|
||||
frontendToBackendParameters(frontendToBackendParametersConfig);
|
||||
|
||||
|
@ -30,13 +30,7 @@ export const frontendToBackendParameters = (
|
||||
): { [key: string]: any } => {
|
||||
const canvasBaseLayer = getCanvasBaseLayer();
|
||||
|
||||
const {
|
||||
generationMode,
|
||||
optionsState,
|
||||
canvasState,
|
||||
systemState,
|
||||
imageToProcessUrl,
|
||||
} = config;
|
||||
const { generationMode, optionsState, canvasState, systemState } = config;
|
||||
|
||||
const {
|
||||
cfgScale,
|
||||
@ -164,7 +158,6 @@ export const frontendToBackendParameters = (
|
||||
|
||||
generationParameters.fit = false;
|
||||
|
||||
generationParameters.init_img = imageToProcessUrl;
|
||||
generationParameters.strength = img2imgStrength;
|
||||
|
||||
generationParameters.invert_mask = shouldPreserveMaskedArea;
|
||||
|
@ -36,7 +36,7 @@ class Concepts(object):
|
||||
models = self.hf_api.list_models(filter=ModelFilter(model_name='sd-concepts-library/'))
|
||||
self.concept_list = [a.id.split('/')[1] for a in models]
|
||||
except Exception as e:
|
||||
print(' ** WARNING: Hugging Face textual inversion concepts libraries could not be loaded. The error was {str(e)}.')
|
||||
print(f' ** WARNING: Hugging Face textual inversion concepts libraries could not be loaded. The error was {str(e)}.')
|
||||
print(' ** You may load .bin and .pt file(s) manually using the --embedding_directory argument.')
|
||||
return self.concept_list
|
||||
|
||||
|
@ -27,7 +27,7 @@ if Globals.try_patchmatch:
|
||||
print('>> Patchmatch initialized')
|
||||
infill_methods.append('patchmatch')
|
||||
else:
|
||||
print('>> Patchmatch not loaded, please see https://github.com/invoke-ai/InvokeAI/blob/patchmatch-install-docs/docs/installation/INSTALL_PATCHMATCH.md')
|
||||
print('>> Patchmatch not loaded (nonfatal)')
|
||||
else:
|
||||
print('>> Patchmatch loading disabled')
|
||||
|
||||
|
@ -101,7 +101,8 @@ class Completer(object):
|
||||
self.linebuffer = None
|
||||
self.auto_history_active = True
|
||||
self.extensions = None
|
||||
self.concepts = Concepts().list_concepts()
|
||||
self.concepts = None
|
||||
self.embedding_terms = set()
|
||||
return
|
||||
|
||||
def complete(self, text, state):
|
||||
@ -270,16 +271,21 @@ class Completer(object):
|
||||
return matches
|
||||
|
||||
def add_embedding_terms(self, terms:list[str]):
|
||||
self.concepts = Concepts().list_concepts()
|
||||
self.concepts.extend(terms)
|
||||
self.embedding_terms = set(terms)
|
||||
if self.concepts:
|
||||
self.embedding_terms.update(self.concepts)
|
||||
|
||||
def _concept_completions(self, text, state):
|
||||
if self.concepts is None:
|
||||
self.concepts = set(Concepts().list_concepts())
|
||||
self.embedding_terms.update(self.concepts)
|
||||
|
||||
partial = text[1:] # this removes the leading '<'
|
||||
if len(partial) == 0:
|
||||
return self.concepts # whole dump - think if user wants this!
|
||||
return list(self.embedding_terms) # whole dump - think if user wants this!
|
||||
|
||||
matches = list()
|
||||
for concept in self.concepts:
|
||||
for concept in self.embedding_terms:
|
||||
if concept.startswith(partial):
|
||||
matches.append(f'<{concept}>')
|
||||
matches.sort()
|
||||
@ -416,7 +422,11 @@ def get_completer(opt:Args, models=[])->Completer:
|
||||
readline.parse_and_bind('set skip-completed-text on')
|
||||
readline.parse_and_bind('set show-all-if-ambiguous on')
|
||||
|
||||
histfile = os.path.join(os.path.expanduser(opt.outdir), '.invoke_history')
|
||||
outdir = os.path.expanduser(opt.outdir)
|
||||
if os.path.isabs(outdir):
|
||||
histfile = os.path.join(outdir,'.invoke_history')
|
||||
else:
|
||||
histfile = os.path.join(Globals.root, outdir, '.invoke_history')
|
||||
try:
|
||||
readline.read_history_file(histfile)
|
||||
readline.set_history_length(1000)
|
||||
|
@ -193,7 +193,7 @@ def mkdir_and_rename(path):
|
||||
if os.path.exists(path):
|
||||
new_name = path + '_archived_' + get_timestamp()
|
||||
print('Path already exists. Rename it to [{:s}]'.format(new_name))
|
||||
os.rename(path, new_name)
|
||||
os.replace(path, new_name)
|
||||
os.makedirs(path)
|
||||
|
||||
|
||||
|
@ -74,8 +74,8 @@
|
||||
"#@title 3. Install dependencies\n",
|
||||
"import gc\n",
|
||||
"\n",
|
||||
"!wget https://raw.githubusercontent.com/invoke-ai/InvokeAI/development/environments-and-requirements/requirements.txt\n",
|
||||
"!wget https://raw.githubusercontent.com/invoke-ai/InvokeAI/development/environments-and-requirements/requirements-lin-win-colab-CUDA.txt\n",
|
||||
"!wget https://raw.githubusercontent.com/invoke-ai/InvokeAI/development/environments-and-requirements/requirements-base.txt\n",
|
||||
"!wget https://raw.githubusercontent.com/invoke-ai/InvokeAI/development/environments-and-requirements/requirements-win-colab-cuda.txt\n",
|
||||
"!pip install colab-xterm\n",
|
||||
"!pip install -r requirements-lin-win-colab-CUDA.txt\n",
|
||||
"!pip install clean-fid torchtext\n",
|
||||
@ -262,17 +262,17 @@
|
||||
},
|
||||
"gpuClass": "standard",
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.10.4 64-bit",
|
||||
"display_name": "Python 3.9.12 64-bit",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python",
|
||||
"version": "3.10.4"
|
||||
"version": "3.9.12"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "3ad933181bd8a04b432d3370b9dc3b0662ad032c4dfaa4e4f1596c548f763858"
|
||||
"hash": "4e870c5c5fe42db7e2c5647ae5af656ff3391bf8c2b729cbf7fa0e16ca8cb5af"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
25
scripts/configure_invokeai.py
Normal file → Executable file
25
scripts/configure_invokeai.py
Normal file → Executable file
@ -108,11 +108,13 @@ completely skip this step.
|
||||
completer.complete_extensions(None) # turn off path-completion mode
|
||||
selection = None
|
||||
while selection is None:
|
||||
choice = input('Download <r>ecommended models, <c>ustomize the list, or <s>kip this step? [r]: ')
|
||||
choice = input('Download <r>ecommended models, <a>ll models, <c>ustomized list, or <s>kip this step? [r]: ')
|
||||
if choice.startswith(('r','R')) or len(choice)==0:
|
||||
selection = 'recommended'
|
||||
elif choice.startswith(('c','C')):
|
||||
selection = 'customized'
|
||||
elif choice.startswith(('a','A')):
|
||||
selection = 'all'
|
||||
elif choice.startswith(('s','S')):
|
||||
selection = 'skip'
|
||||
return selection
|
||||
@ -166,7 +168,14 @@ def recommended_datasets()->dict:
|
||||
if Datasets[ds]['recommended']:
|
||||
datasets[ds]=True
|
||||
return datasets
|
||||
|
||||
|
||||
#---------------------------------------------
|
||||
def all_datasets()->dict:
|
||||
datasets = dict()
|
||||
for ds in Datasets.keys():
|
||||
datasets[ds]=True
|
||||
return datasets
|
||||
|
||||
#-------------------------------Authenticate against Hugging Face
|
||||
def authenticate():
|
||||
print('''
|
||||
@ -217,7 +226,9 @@ This involves a few easy steps.
|
||||
(You can enter anything you like in the token creation field marked "Name".
|
||||
"Role" should be "read").
|
||||
|
||||
Now copy the token to your clipboard and paste it here: '''
|
||||
Now copy the token to your clipboard and paste it at the prompt. Windows
|
||||
users can paste with right-click.
|
||||
Token: '''
|
||||
)
|
||||
access_token = getpass_asterisk.getpass_asterisk()
|
||||
return access_token
|
||||
@ -526,6 +537,8 @@ def download_weights(opt:dict):
|
||||
|
||||
if choice == 'recommended':
|
||||
models = recommended_datasets()
|
||||
elif choice == 'all':
|
||||
models = all_datasets()
|
||||
elif choice == 'customized':
|
||||
models = select_datasets(choice)
|
||||
if models is None and yes_or_no('Quit?',default_yes=False):
|
||||
@ -571,7 +584,8 @@ def select_root(root:str, yes_to_all:bool=False):
|
||||
completer.set_default_dir(default)
|
||||
completer.complete_extensions(())
|
||||
completer.set_line(default)
|
||||
return input(f"Select a directory in which to install InvokeAI's models and configuration files [{default}]: ") or default
|
||||
directory = input(f"Select a directory in which to install InvokeAI's models and configuration files [{default}]: ").strip(' \\')
|
||||
return directory or default
|
||||
|
||||
#-------------------------------------
|
||||
def select_outputs(root:str,yes_to_all:bool=False):
|
||||
@ -581,7 +595,8 @@ def select_outputs(root:str,yes_to_all:bool=False):
|
||||
completer.set_default_dir(os.path.expanduser('~'))
|
||||
completer.complete_extensions(())
|
||||
completer.set_line(default)
|
||||
return input(f'Select the default directory for image outputs [{default}]: ') or default
|
||||
directory = input(f'Select the default directory for image outputs [{default}]: ').strip(' \\')
|
||||
return directory or default
|
||||
|
||||
#-------------------------------------
|
||||
def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
|
BIN
source_installer/WinLongPathsEnabled.reg
Normal file
BIN
source_installer/WinLongPathsEnabled.reg
Normal file
Binary file not shown.
@ -2,22 +2,26 @@
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
VERSION='2.2.3'
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.sh invokeAI
|
||||
cp install.sh.in invokeAI/install.sh
|
||||
chmod a+x invokeAI/install.sh
|
||||
cp readme.txt invokeAI
|
||||
|
||||
zip -r invokeAI-src-installer-linux.zip invokeAI
|
||||
zip -r invokeAI-src-installer-mac.zip invokeAI
|
||||
zip -r invokeAI-src-installer-$VERSION-linux.zip invokeAI
|
||||
zip -r invokeAI-src-installer-$VERSION-mac.zip invokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.bat invokeAI
|
||||
cp install.bat.in invokeAI/install.bat
|
||||
cp readme.txt invokeAI
|
||||
cp WinLongPathsEnabled.reg invokeAI
|
||||
|
||||
zip -r invokeAI-src-installer-windows.zip invokeAI
|
||||
zip -r invokeAI-src-installer-$VERSION-windows.zip invokeAI
|
||||
|
||||
rm -rf invokeAI
|
||||
echo "The installer zips are ready to be distributed.."
|
||||
|
@ -5,12 +5,20 @@
|
||||
@rem For users who already have git and conda, this step will be skipped.
|
||||
|
||||
@rem Next, it'll checkout the project's git repo, if necessary.
|
||||
@rem Finally, it'll create the conda environment and preload the models.
|
||||
@rem Finally, it'll create the conda environment and configure InvokeAI.
|
||||
|
||||
@rem This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo.
|
||||
@rem change to the script's directory
|
||||
PUSHD "%~dp0"
|
||||
|
||||
echo "InvokeAI source installer..."
|
||||
echo ""
|
||||
echo "Some of the installation steps take a long time to run. Please be patient."
|
||||
echo "If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry."
|
||||
echo "<Press any key to start the install process>"
|
||||
pause
|
||||
echo ""
|
||||
|
||||
@rem config
|
||||
set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba
|
||||
@ -84,23 +92,23 @@ copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||
call conda env create
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "Something went wrong while installing Python libraries and cannot continue.
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods."
|
||||
echo "Press any key to continue"
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "See https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#troubleshooting for troubleshooting"
|
||||
echo "tips, or visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
copy source_installer\invoke.bat invoke.bat
|
||||
copy source_installer\update.bat update.bat
|
||||
copy source_installer\invoke.bat.in .\invoke.bat
|
||||
copy source_installer\update.bat.in .\update.bat
|
||||
|
||||
call conda activate invokeai
|
||||
@rem preload the models
|
||||
call python scripts\preload_models.py
|
||||
@rem call configure script
|
||||
call python scripts\configure_invokeai.py
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "The configure script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. To run preload_models.py again,"
|
||||
echo "run the command 'update.bat' in this directory."
|
||||
echo "Press any key to continue"
|
||||
@ -114,5 +122,6 @@ echo "* InvokeAI installed successfully *"
|
||||
echo "You can now start generating images by double-clicking the 'invoke.bat' file (inside this folder)
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit 0
|
||||
exit /b
|
||||
|
||||
|
@ -1,17 +1,21 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script will install git and conda (if not found on the PATH variable)
|
||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
# For users who already have git and conda, this step will be skipped.
|
||||
|
||||
# Next, it'll checkout the project's git repo, if necessary.
|
||||
# Finally, it'll create the conda environment and preload the models.
|
||||
# Finally, it'll create the conda environment and configure InvokeAI.
|
||||
|
||||
# This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo "InvokeAI source installer..."
|
||||
echo ""
|
||||
echo "Some of the installation steps take a long time to run. Please be patient."
|
||||
echo "If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry."
|
||||
read -n 1 -s -r -p "<Press any key to start the install>"
|
||||
echo ""
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
@ -112,21 +116,24 @@ status=$?
|
||||
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "See https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#troubleshooting for troubleshooting"
|
||||
echo "tips, or visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
else
|
||||
ln -sf ./source_installer/invoke.sh .
|
||||
ln -sf ./source_installer/update.sh .
|
||||
ln -sf ./source_installer/invoke.sh.in ./invoke.sh
|
||||
ln -sf ./source_installer/update.sh.in ./update.sh
|
||||
chmod a+rx ./source_installer/invoke.sh.in
|
||||
chmod a+rx ./source_installer/update.sh.in
|
||||
|
||||
conda activate invokeai
|
||||
# preload the models
|
||||
echo "Calling the preload_models.py script"
|
||||
python scripts/preload_models.py
|
||||
# configure
|
||||
echo "Calling the configure_invokeai script"
|
||||
python scripts/configure_invokeai.py
|
||||
status=$?
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "The configure_invoke.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. Try again by running"
|
||||
echo "update.sh in this directory."
|
||||
else
|
@ -10,6 +10,11 @@ source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains abou
|
||||
|
||||
conda activate invokeai
|
||||
|
||||
# set required env var for torch on mac MPS
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
 export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||

|
||||
if [ "$0" != "bash" ]; then
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. command-line"
|
Reference in New Issue
Block a user