mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
60eea09629
This has repeatedly shown itself useful in fixing install issues, especially regarding pytorch CPU/GPU version, so there is little downside to making this the default. Performance impact of this should be negligible. Packages will be reinstalled from pip cache if possible, and downloaded only if necessary. Impact may be felt on slower disks.
388 lines
13 KiB
Python
388 lines
13 KiB
Python
# Copyright (c) 2023 Eugene Brodsky (https://github.com/ebr)
|
|
"""
|
|
InvokeAI installer script
|
|
"""
|
|
|
|
import os
|
|
import platform
|
|
import shutil
|
|
import subprocess
|
|
import sys
|
|
import venv
|
|
from pathlib import Path
|
|
from tempfile import TemporaryDirectory
|
|
from typing import Optional, Tuple
|
|
|
|
SUPPORTED_PYTHON = ">=3.10.0,<=3.11.100"
|
|
INSTALLER_REQS = ["rich", "semver", "requests", "plumbum", "prompt-toolkit"]
|
|
BOOTSTRAP_VENV_PREFIX = "invokeai-installer-tmp"
|
|
|
|
OS = platform.uname().system
|
|
ARCH = platform.uname().machine
|
|
VERSION = "latest"
|
|
|
|
class Installer:
|
|
"""
|
|
Deploys an InvokeAI installation into a given path
|
|
"""
|
|
|
|
def __init__(self) -> None:
|
|
self.reqs = INSTALLER_REQS
|
|
if os.getenv("VIRTUAL_ENV") is not None:
|
|
print("A virtual environment is already activated. Please 'deactivate' before installation.")
|
|
sys.exit(-1)
|
|
self.bootstrap()
|
|
|
|
|
|
def mktemp_venv(self) -> TemporaryDirectory:
|
|
"""
|
|
Creates a temporary virtual environment for the installer itself
|
|
|
|
:return: path to the created virtual environment directory
|
|
:rtype: TemporaryDirectory
|
|
"""
|
|
|
|
# Cleaning up temporary directories on Windows results in a race condition
|
|
# and a stack trace.
|
|
# `ignore_cleanup_errors` was only added in Python 3.10
|
|
if OS == "Windows" and int(platform.python_version_tuple()[1]) >= 10:
|
|
venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX, ignore_cleanup_errors=True)
|
|
else:
|
|
venv_dir = TemporaryDirectory(prefix=BOOTSTRAP_VENV_PREFIX)
|
|
|
|
venv.create(venv_dir.name, with_pip=True)
|
|
self.venv_dir = venv_dir
|
|
set_sys_path(Path(venv_dir.name))
|
|
|
|
return venv_dir
|
|
|
|
def bootstrap(self, verbose: bool = False) -> TemporaryDirectory | None:
|
|
"""
|
|
Bootstrap the installer venv with packages required at install time
|
|
|
|
:return: path to the virtual environment directory that was bootstrapped
|
|
:rtype: TemporaryDirectory
|
|
"""
|
|
|
|
print("Initializing the installer. This may take a minute - please wait...")
|
|
|
|
venv_dir = self.mktemp_venv()
|
|
pip = get_pip_from_venv(Path(venv_dir.name))
|
|
|
|
cmd = [pip, "install", "--require-virtualenv", "--use-pep517"]
|
|
cmd.extend(self.reqs)
|
|
|
|
try:
|
|
res = subprocess.check_output(cmd).decode()
|
|
if verbose:
|
|
print(res)
|
|
return venv_dir
|
|
except subprocess.CalledProcessError as e:
|
|
print(e)
|
|
|
|
def app_venv(self, path: Optional[str] = None) -> Path:
|
|
"""
|
|
Create a virtualenv for the InvokeAI installation
|
|
"""
|
|
|
|
# explicit venv location
|
|
# currently unused in normal operation
|
|
# useful for testing or special cases
|
|
if path is not None:
|
|
venv_dir = Path(path)
|
|
|
|
else:
|
|
venv_dir = self.dest / ".venv"
|
|
|
|
# Prefer to copy python executables
|
|
# so that updates to system python don't break InvokeAI
|
|
try:
|
|
venv.create(venv_dir, with_pip=True)
|
|
# If installing over an existing environment previously created with symlinks,
|
|
# the executables will fail to copy. Keep symlinks in that case
|
|
except shutil.SameFileError:
|
|
venv.create(venv_dir, with_pip=True, symlinks=True)
|
|
|
|
return venv_dir
|
|
|
|
def install(
|
|
self, root: str = "~/invokeai", version: str = "latest", yes_to_all=False, find_links: Optional[Path] = None
|
|
) -> None:
|
|
"""
|
|
Install the InvokeAI application into the given runtime path
|
|
|
|
:param root: Destination path for the installation
|
|
:type root: str
|
|
:param version: InvokeAI version to install
|
|
:type version: str
|
|
:param yes: Accept defaults to all questions
|
|
:type yes: bool
|
|
:param find_links: A local directory to search for requirement wheels before going to remote indexes
|
|
:type find_links: Path
|
|
"""
|
|
|
|
import messages
|
|
|
|
messages.welcome()
|
|
|
|
default_path = Path(os.environ.get("INVOKEAI_ROOT") or Path(root).expanduser().resolve())
|
|
self.dest = default_path if yes_to_all else messages.dest_path(root)
|
|
if self.dest is None:
|
|
print("Could not find or create the destination directory. Installation cancelled.")
|
|
sys.exit(0)
|
|
|
|
# create the venv for the app
|
|
self.venv = self.app_venv()
|
|
|
|
self.instance = InvokeAiInstance(runtime=self.dest, venv=self.venv, version=version)
|
|
|
|
# install dependencies and the InvokeAI application
|
|
(extra_index_url, optional_modules) = get_torch_source() if not yes_to_all else (None, None)
|
|
self.instance.install(
|
|
extra_index_url,
|
|
optional_modules,
|
|
find_links,
|
|
)
|
|
|
|
# install the launch/update scripts into the runtime directory
|
|
self.instance.install_user_scripts()
|
|
|
|
# run through the configuration flow
|
|
self.instance.configure()
|
|
|
|
|
|
class InvokeAiInstance:
|
|
"""
|
|
Manages an installed instance of InvokeAI, comprising a virtual environment and a runtime directory.
|
|
The virtual environment *may* reside within the runtime directory.
|
|
A single runtime directory *may* be shared by multiple virtual environments, though this isn't currently tested or supported.
|
|
"""
|
|
|
|
def __init__(self, runtime: Path, venv: Path, version: str) -> None:
|
|
self.runtime = runtime
|
|
self.venv = venv
|
|
self.pip = get_pip_from_venv(venv)
|
|
self.version = version
|
|
|
|
set_sys_path(venv)
|
|
os.environ["INVOKEAI_ROOT"] = str(self.runtime.expanduser().resolve())
|
|
os.environ["VIRTUAL_ENV"] = str(self.venv.expanduser().resolve())
|
|
|
|
def get(self) -> tuple[Path, Path]:
|
|
"""
|
|
Get the location of the virtualenv directory for this installation
|
|
|
|
:return: Paths of the runtime and the venv directory
|
|
:rtype: tuple[Path, Path]
|
|
"""
|
|
|
|
return (self.runtime, self.venv)
|
|
|
|
def install(self, extra_index_url=None, optional_modules=None, find_links=None):
|
|
"""
|
|
Install this instance, including dependencies and the app itself
|
|
|
|
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
|
:type extra_index_url: str
|
|
"""
|
|
|
|
import messages
|
|
|
|
messages.simple_banner("Installing the InvokeAI Application :art:")
|
|
self.install_app(extra_index_url, optional_modules, find_links)
|
|
|
|
def install_app(self, extra_index_url=None, optional_modules=None, find_links=None):
|
|
"""
|
|
Install the application with pip.
|
|
Supports installation from PyPi or from a local source directory.
|
|
|
|
:param extra_index_url: the "--extra-index-url ..." line for pip to look in extra indexes.
|
|
:type extra_index_url: str
|
|
|
|
:param optional_modules: optional modules to install using "[module1,module2]" format.
|
|
:type optional_modules: str
|
|
|
|
:param find_links: path to a directory containing wheels to be searched prior to going to the internet
|
|
:type find_links: Path
|
|
"""
|
|
|
|
## this only applies to pypi installs; TODO actually use this
|
|
if self.version == "pre":
|
|
version = None
|
|
pre = "--pre"
|
|
else:
|
|
version = self.version
|
|
pre = None
|
|
|
|
src = f"invokeai=={version}" if version is not None else "invokeai"
|
|
|
|
from plumbum import FG, local # type: ignore
|
|
|
|
pip = local[self.pip]
|
|
|
|
_ = (
|
|
pip[
|
|
"install",
|
|
"--require-virtualenv",
|
|
"--force-reinstall",
|
|
"--use-pep517",
|
|
str(src) + (optional_modules if optional_modules else ""),
|
|
"--find-links" if find_links is not None else None,
|
|
find_links,
|
|
"--extra-index-url" if extra_index_url is not None else None,
|
|
extra_index_url,
|
|
pre,
|
|
]
|
|
& FG
|
|
)
|
|
|
|
def configure(self):
|
|
"""
|
|
Configure the InvokeAI runtime directory
|
|
"""
|
|
|
|
auto_install = False
|
|
# set sys.argv to a consistent state
|
|
new_argv = [sys.argv[0]]
|
|
for i in range(1, len(sys.argv)):
|
|
el = sys.argv[i]
|
|
if el in ["-r", "--root"]:
|
|
new_argv.append(el)
|
|
new_argv.append(sys.argv[i + 1])
|
|
elif el in ["-y", "--yes", "--yes-to-all"]:
|
|
auto_install = True
|
|
sys.argv = new_argv
|
|
|
|
import messages
|
|
import requests # to catch download exceptions
|
|
|
|
auto_install = auto_install or messages.user_wants_auto_configuration()
|
|
if auto_install:
|
|
sys.argv.append("--yes")
|
|
else:
|
|
messages.introduction()
|
|
|
|
from invokeai.frontend.install.invokeai_configure import invokeai_configure
|
|
|
|
# NOTE: currently the config script does its own arg parsing! this means the command-line switches
|
|
# from the installer will also automatically propagate down to the config script.
|
|
# this may change in the future with config refactoring!
|
|
succeeded = False
|
|
try:
|
|
invokeai_configure()
|
|
succeeded = True
|
|
except requests.exceptions.ConnectionError as e:
|
|
print(f"\nA network error was encountered during configuration and download: {str(e)}")
|
|
except OSError as e:
|
|
print(f"\nAn OS error was encountered during configuration and download: {str(e)}")
|
|
except Exception as e:
|
|
print(f"\nA problem was encountered during the configuration and download steps: {str(e)}")
|
|
finally:
|
|
if not succeeded:
|
|
print('To try again, find the "invokeai" directory, run the script "invoke.sh" or "invoke.bat"')
|
|
print("and choose option 7 to fix a broken install, optionally followed by option 5 to install models.")
|
|
print("Alternatively you can relaunch the installer.")
|
|
|
|
def install_user_scripts(self):
|
|
"""
|
|
Copy the launch and update scripts to the runtime dir
|
|
"""
|
|
|
|
ext = "bat" if OS == "Windows" else "sh"
|
|
|
|
scripts = ["invoke"]
|
|
|
|
for script in scripts:
|
|
src = Path(__file__).parent / ".." / "templates" / f"{script}.{ext}.in"
|
|
dest = self.runtime / f"{script}.{ext}"
|
|
shutil.copy(src, dest)
|
|
os.chmod(dest, 0o0755)
|
|
|
|
def update(self):
|
|
pass
|
|
|
|
def remove(self):
|
|
pass
|
|
|
|
|
|
### Utility functions ###
|
|
|
|
|
|
def get_pip_from_venv(venv_path: Path) -> str:
|
|
"""
|
|
Given a path to a virtual environment, get the absolute path to the `pip` executable
|
|
in a cross-platform fashion. Does not validate that the pip executable
|
|
actually exists in the virtualenv.
|
|
|
|
:param venv_path: Path to the virtual environment
|
|
:type venv_path: Path
|
|
:return: Absolute path to the pip executable
|
|
:rtype: str
|
|
"""
|
|
|
|
pip = "Scripts\\pip.exe" if OS == "Windows" else "bin/pip"
|
|
return str(venv_path.expanduser().resolve() / pip)
|
|
|
|
|
|
def set_sys_path(venv_path: Path) -> None:
|
|
"""
|
|
Given a path to a virtual environment, set the sys.path, in a cross-platform fashion,
|
|
such that packages from the given venv may be imported in the current process.
|
|
Ensure that the packages from system environment are not visible (emulate
|
|
the virtual env 'activate' script) - this doesn't work on Windows yet.
|
|
|
|
:param venv_path: Path to the virtual environment
|
|
:type venv_path: Path
|
|
"""
|
|
|
|
# filter out any paths in sys.path that may be system- or user-wide
|
|
# but leave the temporary bootstrap virtualenv as it contains packages we
|
|
# temporarily need at install time
|
|
sys.path = list(filter(lambda p: not p.endswith("-packages") or p.find(BOOTSTRAP_VENV_PREFIX) != -1, sys.path))
|
|
|
|
# determine site-packages/lib directory location for the venv
|
|
lib = "Lib" if OS == "Windows" else f"lib/python{sys.version_info.major}.{sys.version_info.minor}"
|
|
|
|
# add the site-packages location to the venv
|
|
sys.path.append(str(Path(venv_path, lib, "site-packages").expanduser().resolve()))
|
|
|
|
|
|
def get_torch_source() -> Tuple[str | None, str | None]:
|
|
"""
|
|
Determine the extra index URL for pip to use for torch installation.
|
|
This depends on the OS and the graphics accelerator in use.
|
|
This is only applicable to Windows and Linux, since PyTorch does not
|
|
offer accelerated builds for macOS.
|
|
|
|
Prefer CUDA-enabled wheels if the user wasn't sure of their GPU, as it will fallback to CPU if possible.
|
|
|
|
A NoneType return means just go to PyPi.
|
|
|
|
:return: tuple consisting of (extra index url or None, optional modules to load or None)
|
|
:rtype: list
|
|
"""
|
|
|
|
from messages import graphical_accelerator
|
|
|
|
# device can be one of: "cuda", "rocm", "cpu", "idk"
|
|
device = graphical_accelerator()
|
|
|
|
url = None
|
|
optional_modules = "[onnx]"
|
|
if OS == "Linux":
|
|
if device == "rocm":
|
|
url = "https://download.pytorch.org/whl/rocm5.6"
|
|
elif device == "cpu":
|
|
url = "https://download.pytorch.org/whl/cpu"
|
|
|
|
if device == "cuda":
|
|
url = "https://download.pytorch.org/whl/cu121"
|
|
optional_modules = "[xformers,onnx-cuda]"
|
|
if device == "cuda_and_dml":
|
|
url = "https://download.pytorch.org/whl/cu121"
|
|
optional_modules = "[xformers,onnx-directml]"
|
|
|
|
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13
|
|
|
|
return (url, optional_modules)
|