mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge remote-tracking branch 'origin/main' into dev/diffusers
# Conflicts: # environments-and-requirements/requirements-base.txt # ldm/invoke/generator/txt2img.py # ldm/invoke/generator/txt2img2img.py
This commit is contained in:
commit
1cae089889
9
.github/workflows/build-container.yml
vendored
9
.github/workflows/build-container.yml
vendored
@ -5,17 +5,12 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'development'
|
||||
- 'update-dockerfile'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch:
|
||||
- x86_64
|
||||
- aarch64
|
||||
pip-requirements:
|
||||
- requirements-lin-amd.txt
|
||||
- requirements-lin-cuda.txt
|
||||
@ -37,7 +32,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: docker-build/Dockerfile
|
||||
platforms: Linux/${{ matrix.arch }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: false
|
||||
tags: ${{ env.dockertag }}:${{ matrix.pip-requirements }}-${{ matrix.arch }}
|
||||
tags: ${{ env.dockertag }}:${{ matrix.pip-requirements }}
|
||||
build-args: pip_requirements=${{ matrix.pip-requirements }}
|
||||
|
4
.github/workflows/test-invoke-conda.yml
vendored
4
.github/workflows/test-invoke-conda.yml
vendored
@ -104,9 +104,9 @@ jobs:
|
||||
--no-interactive --yes \
|
||||
--full-precision # can't use fp16 weights without a GPU
|
||||
|
||||
- name: cat ~/.invokeai
|
||||
- name: cat invokeai.init
|
||||
id: cat-invokeai
|
||||
run: cat ~/.invokeai
|
||||
run: cat ${{ env.INVOKEAI_ROOT }}/invokeai.init
|
||||
|
||||
- name: Run the tests
|
||||
id: run-tests
|
||||
|
12
.gitignore
vendored
12
.gitignore
vendored
@ -6,6 +6,7 @@ models/ldm/stable-diffusion-v1/model.ckpt
|
||||
# ignore user models config
|
||||
configs/models.user.yaml
|
||||
config/models.user.yml
|
||||
invokeai.init
|
||||
|
||||
# ignore the Anaconda/Miniconda installer used while building Docker image
|
||||
anaconda.sh
|
||||
@ -222,12 +223,11 @@ environment.yml
|
||||
requirements.txt
|
||||
|
||||
# source installer files
|
||||
source_installer/*zip
|
||||
source_installer/invokeAI
|
||||
install.bat
|
||||
install.sh
|
||||
update.bat
|
||||
update.sh
|
||||
installer/*zip
|
||||
installer/install.bat
|
||||
installer/install.sh
|
||||
installer/update.bat
|
||||
installer/update.sh
|
||||
|
||||
# this may be present if the user created a venv
|
||||
invokeai
|
||||
|
@ -208,10 +208,9 @@ class InvokeAIWebServer:
|
||||
FlaskUI(
|
||||
app=self.app,
|
||||
socketio=self.socketio,
|
||||
start_server="flask-socketio",
|
||||
server="flask_socketio",
|
||||
width=1600,
|
||||
height=1000,
|
||||
idle_interval=10,
|
||||
port=self.port
|
||||
).run()
|
||||
except KeyboardInterrupt:
|
||||
@ -245,14 +244,16 @@ class InvokeAIWebServer:
|
||||
|
||||
def find_frontend(self):
|
||||
my_dir = os.path.dirname(__file__)
|
||||
for candidate in (os.path.join(my_dir,'..','frontend','dist'), # pip install -e .
|
||||
os.path.join(my_dir,'../../../../frontend','dist') # pip install .
|
||||
# LS: setup.py seems to put the frontend in different places on different systems, so
|
||||
# this is fragile and needs to be replaced with a better way of finding the front end.
|
||||
for candidate in (os.path.join(my_dir,'..','frontend','dist'), # pip install -e .
|
||||
os.path.join(my_dir,'../../../../frontend','dist'), # pip install . (Linux, Mac)
|
||||
os.path.join(my_dir,'../../../frontend','dist'), # pip install . (Windows)
|
||||
):
|
||||
if os.path.exists(candidate):
|
||||
return candidate
|
||||
assert "Frontend files cannot be found. Cannot continue"
|
||||
|
||||
|
||||
def setup_app(self):
|
||||
self.result_url = "outputs/"
|
||||
self.init_image_url = "outputs/init-images/"
|
||||
|
@ -1,32 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
VERSION='2.2.3'
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.sh.in InvokeAI/install.sh
|
||||
chmod a+x InvokeAI/install.sh
|
||||
cp readme.txt InvokeAI
|
||||
|
||||
zip -r InvokeAI-binary-$VERSION-linux.zip InvokeAI
|
||||
zip -r InvokeAI-binary-$VERSION-mac.zip InvokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.bat.in InvokeAI/install.bat
|
||||
cp readme.txt InvokeAI
|
||||
cp WinLongPathsEnabled.reg InvokeAI
|
||||
|
||||
zip -r InvokeAI-binary-$VERSION-windows.zip InvokeAI
|
||||
|
||||
rm -rf InvokeAI
|
||||
|
||||
echo "The installer zips are ready for distribution."
|
@ -2,10 +2,6 @@
|
||||
|
||||
set -eu
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||

|
||||
. .venv/bin/activate
|
||||
|
||||
# set required env var for torch on mac MPS
|
||||
|
@ -9,7 +9,7 @@ einops
|
||||
eventlet
|
||||
flask_cors
|
||||
flask_socketio
|
||||
flaskwebgui
|
||||
flaskwebgui==1.0.3
|
||||
getpass_asterisk
|
||||
imageio-ffmpeg
|
||||
pyreadline3
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM ubuntu:22.10
|
||||
FROM python:3.10-slim AS builder
|
||||
|
||||
# use bash
|
||||
SHELL [ "/bin/bash", "-c" ]
|
||||
@ -7,28 +7,42 @@ SHELL [ "/bin/bash", "-c" ]
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
build-essential \
|
||||
gcc \
|
||||
git \
|
||||
libgl1-mesa-glx \
|
||||
libglib2.0-0 \
|
||||
pip \
|
||||
python3 \
|
||||
python3-dev \
|
||||
gcc=4:10.2.* \
|
||||
libgl1-mesa-glx=20.3.* \
|
||||
libglib2.0-0=2.66.* \
|
||||
python3-dev=3.9.* \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# set workdir and copy sources
|
||||
WORKDIR /invokeai
|
||||
# set workdir, PATH and copy sources
|
||||
WORKDIR /usr/src/app
|
||||
ENV PATH /usr/src/app/.venv/bin:$PATH
|
||||
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
|
||||
COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./
|
||||
|
||||
# install requirements and link outputs folder
|
||||
RUN pip install \
|
||||
--no-cache-dir \
|
||||
-r ${PIP_REQUIREMENTS}
|
||||
# install requirements
|
||||
RUN python3 -m venv .venv \
|
||||
&& pip install \
|
||||
--no-cache-dir \
|
||||
-r ${PIP_REQUIREMENTS}
|
||||
|
||||
FROM python:3.10-slim AS runtime
|
||||
|
||||
# Install necesarry packages
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y \
|
||||
--no-install-recommends \
|
||||
libgl1-mesa-glx=20.3.* \
|
||||
libglib2.0-0=2.66.* \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /usr/src/app
|
||||
COPY --from=builder /usr/src/app .
|
||||
|
||||
# set Environment, Entrypoint and default CMD
|
||||
ENV INVOKEAI_ROOT /data
|
||||
ENTRYPOINT [ "python3", "scripts/invoke.py", "--outdir=/data/outputs" ]
|
||||
ENV PATH=/usr/src/app/.venv/bin:$PATH
|
||||
|
||||
ENTRYPOINT [ "python3", "scripts/invoke.py" ]
|
||||
CMD [ "--web", "--host=0.0.0.0" ]
|
||||
|
@ -36,7 +36,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
|
||||
COPY . .
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
cp binary_installer/py3.10-linux-x86_64-cuda-reqs.txt requirements.txt && \
|
||||
cp environments-and-requirements/requirements-lin-cuda.txt requirements.txt && \
|
||||
pip install -r requirements.txt &&\
|
||||
pip install -e .
|
||||
|
||||
|
@ -12,13 +12,13 @@ pip_requirements=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
|
||||
dockerfile=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
|
||||
|
||||
# print the settings
|
||||
echo "You are using these values:"
|
||||
echo -e "Dockerfile:\t\t ${dockerfile}"
|
||||
echo -e "requirements:\t\t ${pip_requirements}"
|
||||
echo -e "volumename:\t\t ${volumename}"
|
||||
echo -e "arch:\t\t\t ${arch}"
|
||||
echo -e "platform:\t\t ${platform}"
|
||||
echo -e "invokeai_tag:\t\t ${invokeai_tag}\n"
|
||||
echo -e "You are using these values:\n"
|
||||
echo -e "Dockerfile:\t ${dockerfile}"
|
||||
echo -e "requirements:\t ${pip_requirements}"
|
||||
echo -e "volumename:\t ${volumename}"
|
||||
echo -e "arch:\t\t ${arch}"
|
||||
echo -e "platform:\t ${platform}"
|
||||
echo -e "invokeai_tag:\t ${invokeai_tag}\n"
|
||||
|
||||
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
||||
echo "Volume already exists"
|
||||
|
@ -3,6 +3,10 @@ set -e
|
||||
|
||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||
|
||||
echo -e "You are using these values:\n"
|
||||
echo -e "volumename:\t ${volumename}"
|
||||
echo -e "invokeai_tag:\t ${invokeai_tag}\n"
|
||||
|
||||
docker run \
|
||||
--interactive \
|
||||
--tty \
|
||||
|
@ -82,13 +82,18 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
||||
|
||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
||||
users can use either an Nvidia-based card (with CUDA support) or an
|
||||
AMD card (using the ROCm driver). For full installation and upgrade
|
||||
instructions, please see:
|
||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||
AMD card (using the ROCm driver).
|
||||
|
||||
First time users, please see [Automated
|
||||
Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
|
||||
getting InvokeAI up and running on your system. For alternative
|
||||
installation and upgrade instructions, please see: [InvokeAI
|
||||
Installation Overview](installation/)
|
||||
|
||||
Linux users who wish to make use of the PyPatchMatch inpainting
|
||||
functions will need to perform a bit of extra work to enable this
|
||||
module. Instructions can be found at [Installing PyPatchMatch](installation/INSTALL_PATCHMATCH.md).
|
||||
module. Instructions can be found at [Installing
|
||||
PyPatchMatch](installation/INSTALL_PATCHMATCH.md).
|
||||
|
||||
## :fontawesome-solid-computer: Hardware Requirements
|
||||
|
||||
@ -100,9 +105,13 @@ You wil need one of the following:
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
|
||||
We do not recommend the GTX 1650 or 1660 series video cards. They are
|
||||
unable to run in half-precision mode and do not come with sufficient VRAM
|
||||
to render 512x512 images.
|
||||
We do **not recommend** the following video cards due to issues with
|
||||
their running in half-precision mode and having insufficient VRAM to
|
||||
render 512x512 images in full-precision mode:
|
||||
|
||||
- NVIDIA 10xx series cards such as the 1080ti
|
||||
- GTX 1650 series cards
|
||||
- GTX 1660 series cards
|
||||
|
||||
### :fontawesome-solid-memory: Memory
|
||||
|
||||
@ -110,16 +119,11 @@ to render 512x512 images.
|
||||
|
||||
### :fontawesome-regular-hard-drive: Disk
|
||||
|
||||
- At least 12 GB of free disk space for the machine learning model, Python, and
|
||||
- At least 18 GB of free disk space for the machine learning model, Python, and
|
||||
all its dependencies.
|
||||
|
||||
!!! info
|
||||
|
||||
If you are have a Nvidia 10xx series card (e.g. the 1080ti), please run the invoke script in
|
||||
full-precision mode as shown below.
|
||||
|
||||
Similarly, specify full-precision mode on Apple M1 hardware.
|
||||
|
||||
Precision is auto configured based on the device. If however you encounter errors like
|
||||
`expected type Float but found Half` or `not implemented for Half` you can try starting
|
||||
`invoke.py` with the `--precision=float32` flag:
|
||||
|
315
docs/installation/INSTALL_AUTOMATED.md
Normal file
315
docs/installation/INSTALL_AUTOMATED.md
Normal file
@ -0,0 +1,315 @@
|
||||
---
|
||||
title: InvokeAI Automated Installation
|
||||
---
|
||||
|
||||
# InvokeAI Automated Installation
|
||||
|
||||
## Introduction
|
||||
|
||||
The automated installer is a shell script that attempts to automate every step
|
||||
needed to install and run InvokeAI on a stock computer running recent versions
|
||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
|
||||
## Walk through
|
||||
|
||||
1. Make sure that your system meets the
|
||||
[hardware requirements](../index.md#hardware-requirements) and has the
|
||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
||||
with an AMD GPU installed, you may need to install the
|
||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
!!! info "Required Space"
|
||||
|
||||
Installation requires roughly 18G of free disk space to load the libraries and
|
||||
recommended model weights files.
|
||||
|
||||
2. Check that your system has an up-to-date Python installed. To do this, open
|
||||
up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
|
||||
"Powershell" on Windows) and type `python --version`. If Python is
|
||||
installed, it will print out the version number. If it is version `3.9.1` or
|
||||
higher, you meet requirements.
|
||||
|
||||
!!! warning "If you see an older version, or get a command not found error"
|
||||
|
||||
Go to [Python Downloads](https://www.python.org/downloads/) and
|
||||
download the appropriate installer package for your platform. We recommend
|
||||
[Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||
which has been extensively tested with InvokeAI.
|
||||
|
||||
!!! warning "At this time we do not recommend Python 3.11"
|
||||
|
||||
=== "Windows users"
|
||||
|
||||
- During the Python configuration process,
|
||||
Please look out for a checkbox to add Python to your PATH
|
||||
and select it. If the install script complains that it can't
|
||||
find python, then open the Python installer again and choose
|
||||
"Modify" existing installation.
|
||||
|
||||
- There is a slight possibility that you will encountered
|
||||
DLL load errors at the very end of the installation process. This is caused
|
||||
by not having up to date Visual C++ redistributable libraries. If this
|
||||
happens to you, you can install the C++ libraries from this site:
|
||||
https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
|
||||
|
||||
=== "Mac users"
|
||||
|
||||
- After installing Python, you may need to run the
|
||||
following command from the Terminal in order to install the Web
|
||||
certificates needed to download model data from https sites. If
|
||||
you see lots of CERTIFICATE ERRORS during the last part of the
|
||||
install, this is the problem, and you can fix it with this command:
|
||||
|
||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||
|
||||
- You may need to install the Xcode command line tools. These
|
||||
are a set of tools that are needed to run certain applications in a
|
||||
Terminal, including InvokeAI. This package is provided directly by Apple.
|
||||
|
||||
- To install, open a terminal window and run `xcode-select
|
||||
--install`. You will get a macOS system popup guiding you through the
|
||||
install. If you already have them installed, you will instead see some
|
||||
output in the Terminal advising you that the tools are already installed.
|
||||
|
||||
- More information can be found here:
|
||||
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
||||
|
||||
=== "Linux users"
|
||||
|
||||
- See [Installing Python in Ubuntu](#installing-python-in-ubuntu) for some
|
||||
platform-specific tips.
|
||||
|
||||
3. The source installer is distributed in ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
look for a series of files named:
|
||||
|
||||
- [InvokeAI-installer-2.2.4-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-mac.zip)
|
||||
- [InvokeAI-installer-2.2.4-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-windows.zip)
|
||||
- [InvokeAI-installer-2.2.4-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-linux.zip)
|
||||
|
||||
Download the one that is appropriate for your operating system.
|
||||
|
||||
4. Unpack the zip file into a convenient directory. This will create a new
|
||||
directory named "InvokeAI-Installer". This example shows how this would look
|
||||
using the `unzip` command-line tool, but you may use any graphical or
|
||||
command-line Zip extractor:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> unzip InvokeAI-installer-2.2.4-windows.zip
|
||||
Archive: C: \Linco\Downloads\InvokeAI-installer-2.2.4-windows.zip
|
||||
creating: InvokeAI-Installer\
|
||||
inflating: InvokeAI-Installer\install.bat
|
||||
inflating: InvokeAI-Installer\readme.txt
|
||||
...
|
||||
```
|
||||
|
||||
After successful installation, you can delete the `InvokeAI-Installer`
|
||||
directory.
|
||||
|
||||
5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
|
||||
accept the dialog box that asks you if you wish to modify your registry.
|
||||
This activates long filename support on your system and will prevent
|
||||
mysterious errors during installation.
|
||||
|
||||
6. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
|
||||
On Windows systems you will probably get an "Untrusted Publisher" warning.
|
||||
Click on "More Info" and select "Run Anyway." You trust us, right?
|
||||
|
||||
7. Alternatively, from the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd InvokeAI-Installer
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
```
|
||||
|
||||
8. The script will ask you to choose where to install InvokeAI. Select a
|
||||
directory with at least 18G of free space for a full install. InvokeAI and
|
||||
all its support files will be installed into a new directory named
|
||||
`invokeai` located at the location you specify.
|
||||
|
||||
- The default is to install the `invokeai` directory in your home directory,
|
||||
usually `C:\Users\YourName\invokeai` on Windows systems,
|
||||
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
||||
on Macintoshes, where "YourName" is your login name.
|
||||
|
||||
- The script uses tab autocompletion to suggest directory path completions.
|
||||
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
||||
to suggest completions.
|
||||
|
||||
9. Sit back and let the install script work. It will install the third-party
|
||||
libraries needed by InvokeAI, then download the current InvokeAI release and
|
||||
install it.
|
||||
|
||||
Be aware that some of the library download and install steps take a long
|
||||
time. In particular, the `pytorch` package is quite large and often appears
|
||||
to get "stuck" at 99.9%. Have patience and the installation step will
|
||||
eventually resume. However, there are occasions when the library install
|
||||
does legitimately get stuck. If you have been waiting for more than ten
|
||||
minutes and nothing is happening, you can interrupt the script with ^C. You
|
||||
may restart it and it will pick up where it left off.
|
||||
|
||||
10. After installation completes, the installer will launch a script called
|
||||
`configure_invokeai.py`, which will guide you through the first-time process
|
||||
of selecting one or more Stable Diffusion model weights files, downloading
|
||||
and configuring them. We provide a list of popular models that InvokeAI
|
||||
performs well with. However, you can add more weight files later on using
|
||||
the command-line client or the Web UI. See
|
||||
[Installing Models](INSTALLING_MODELS.md) for details.
|
||||
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you must agree to in order to use. The script will list the
|
||||
steps you need to take to create an account on the official site that hosts
|
||||
the weights files, accept the agreement, and provide an access token that
|
||||
allows InvokeAI to legally download and install the weights files.
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
||||
|
||||
11. The script will now exit and you'll be ready to generate some images. Look
|
||||
for the directory `invokeai` installed in the location you chose at the
|
||||
beginning of the install session. Look for a shell script named `invoke.sh`
|
||||
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
||||
it or typing its name at the command-line:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeai
|
||||
C:\Documents\Linco\invokeAI> invoke.bat
|
||||
```
|
||||
|
||||
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
|
||||
(1) the command-line interface, or (2) the web GUI. If you start the
|
||||
latter, you can load the user interface by pointing your browser at
|
||||
http://localhost:9090.
|
||||
|
||||
- The script also offers you a third option labeled "open the developer
|
||||
console". If you choose this option, you will be dropped into a
|
||||
command-line interface in which you can run python commands directly,
|
||||
access developer tools, and launch InvokeAI with customized options.
|
||||
|
||||
12. You can launch InvokeAI with several different command-line arguments that
|
||||
customize its behavior. For example, you can change the location of the
|
||||
inage output directory, or select your favorite sampler. See the
|
||||
[Command-Line Interface](../features/CLI.md) for a full list of the options.
|
||||
|
||||
- To set defaults that will take effect every time you launch InvokeAI,
|
||||
use a text editor (e.g. Notepad) to exit the file
|
||||
`invokeai\invokeai.init`. It contains a variety of examples that you can
|
||||
follow to add and modify launch options.
|
||||
|
||||
!!! warning "The `invokeai` directory contains the `invoke` application, its
|
||||
configuration files, the model weight files, and outputs of image generation.
|
||||
Once InvokeAI is installed, do not move or remove this directory."
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### _Package dependency conflicts_
|
||||
|
||||
If you have previously installed InvokeAI or another Stable Diffusion package,
|
||||
the installer may occasionally pick up outdated libraries and either the
|
||||
installer or `invoke` will fail with complaints about library conflicts. You can
|
||||
address this by entering the `invokeai` directory and running `update.sh`, which
|
||||
will bring InvokeAI up to date with the latest libraries.
|
||||
|
||||
### ldm from pypi
|
||||
|
||||
!!! warning
|
||||
|
||||
Some users have tried to correct dependency problems by installing
|
||||
the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
|
||||
has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
|
||||
ldm will make matters worse. If you've installed ldm, uninstall it with
|
||||
`pip uninstall ldm`.
|
||||
|
||||
### Corrupted configuration file
|
||||
|
||||
Everything seems to install ok, but `invoke` complains of a corrupted
|
||||
configuration file and goes back into the configuration process (asking you to
|
||||
download models, etc), but this doesn't fix the problem.
|
||||
|
||||
This issue is often caused by a misconfigured configuration directive in the
|
||||
`invokeai\invokeai.init` initialization file that contains startup settings. The
|
||||
easiest way to fix the problem is to move the file out of the way and re-run
|
||||
`configure_invokeai.py`. Enter the developer's console (option 3 of the launcher
|
||||
script) and run this command:
|
||||
|
||||
```cmd
|
||||
configure_invokeai.py --root=.
|
||||
```
|
||||
|
||||
Note the dot (.) after `--root`. It is part of the command.
|
||||
|
||||
_If none of these maneuvers fixes the problem_ then please report the problem to
|
||||
the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
||||
assistance.
|
||||
|
||||
### other problems
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
available to help you. Either create an
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||
make a request for help on the "bugs-and-support" channel of our
|
||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||
organization, but typically somebody will be available to help you within 24
|
||||
hours, and often much sooner.
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||
To update to the latest released version (recommended), run the `update.sh`
|
||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||
release and re-run the `configure_invokeai` script to download any updated
|
||||
models files that may be needed. You can also use this to add additional models
|
||||
that you did not select at installation time.
|
||||
|
||||
You can now close the developer console and run `invoke` as before. If you get
|
||||
complaints about missing models, then you may need to do the additional step of
|
||||
running `configure_invokeai.py`. This happens relatively infrequently. To do
|
||||
this, simply open up the developer's console again and type
|
||||
`python scripts/configure_invokeai.py`.
|
||||
|
||||
You may also use the `update` script to install any selected version of
|
||||
InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
|
||||
link of the version you wish to install. You can find the zip links by going to
|
||||
the one of the release pages and looking for the **Assets** section at the
|
||||
bottom. Alternatively, you can browse "branches" and "tags" at the top of the
|
||||
big code directory on the InvokeAI welcome page. When you find the version you
|
||||
want to install, go to the green "<> Code" button at the top, and copy the
|
||||
"Download ZIP" link.
|
||||
|
||||
Now run `update.sh` (or `update.bat`) with the URL of the desired InvokeAI
|
||||
version as its argument. For example, this will install the old 2.2.0 release.
|
||||
|
||||
```cmd
|
||||
update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.0.zip
|
||||
```
|
||||
|
||||
## Installing Python in Ubuntu
|
||||
|
||||
For reasons that are not entirely clear, installing the correct version of
|
||||
Python can be a bit of a challenge on Ubuntu, Linux Mint, and other
|
||||
Ubuntu-derived distributions.
|
||||
|
||||
In particular, Ubuntu version 20.04 LTS comes with an old version of Python,
|
||||
does not come with the PIP package manager installed, and to make matters worse,
|
||||
the `python` command points to Python2, not Python3.
|
||||
|
||||
Here is the quick recipe for bringing your system up to date:
|
||||
|
||||
```
|
||||
sudo apt update
|
||||
sudo apt install python3.9
|
||||
sudo apt install python3-pip
|
||||
cd /usr/bin
|
||||
sudo ln -sf python3.9 python3
|
||||
sudo ln -sf python3 python
|
||||
```
|
||||
|
||||
You can still access older versions of Python by calling `python2`, `python3.8`,
|
||||
etc.
|
@ -8,7 +8,7 @@ title: Manual Installation
|
||||
|
||||
!!! warning "This is for advanced Users"
|
||||
|
||||
who are already expirienced with using conda or pip
|
||||
who are already experienced with using conda or pip
|
||||
|
||||
## Introduction
|
||||
|
||||
|
@ -5,39 +5,20 @@ title: Overview
|
||||
We offer several ways to install InvokeAI, each one suited to your
|
||||
experience and preferences.
|
||||
|
||||
1. [InvokeAI source code installer](INSTALL_SOURCE.md)
|
||||
This is a script that will install Python, the Anaconda ("conda")
|
||||
package manager, all of InvokeAI's its essential third party
|
||||
libraries and InvokeAI itself. It includes access to a "developer
|
||||
console" which will help us debug problems with you and give you
|
||||
to access experimental features.
|
||||
1. [Automated Installer](INSTALL_AUTOMATED.md)
|
||||
|
||||
When a new InvokeAI feature is available, even between releases,
|
||||
you will be able to upgrade and try it out by running an `update`
|
||||
script. This method is recommended for individuals who wish to
|
||||
stay on the cutting edge of InvokeAI development and are not
|
||||
afraid of occasional breakage.
|
||||
|
||||
To get started go to the bottom of the
|
||||
[Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
||||
and download the .zip file for your platform. Unzip the file.
|
||||
If you are on a Windows system, double-click on the `install.bat`
|
||||
script. On a Mac or Linux system, navigate to the file `install.sh`
|
||||
from within the terminal application, and run the script.
|
||||
|
||||
Sit back and watch the script run.
|
||||
|
||||
**Important Caveats**
|
||||
- This script is a bit cranky and occasionally hangs or times out,
|
||||
forcing you to cancel and restart the script (it will pick up where
|
||||
it left off).
|
||||
This is a script that will install all of InvokeAI's essential
|
||||
third party libraries and InvokeAI itself. It includes access to a
|
||||
"developer console" which will help us debug problems with you and
|
||||
give you to access experimental features.
|
||||
|
||||
2. [Manual Installation](INSTALL_MANUAL.md)
|
||||
|
||||
In this method you will manually run the commands needed to install
|
||||
InvokeAI and its dependencies. We offer two recipes: one suited to
|
||||
those who prefer the `conda` tool, and one suited to those who prefer
|
||||
`pip` and Python virtual environments.
|
||||
`pip` and Python virtual environments. In our hands the pip install
|
||||
is faster and more reliable, but your mileage may vary.
|
||||
|
||||
This method is recommended for users who have previously used `conda`
|
||||
or `pip` in the past, developers, and anyone who wishes to remain on
|
||||
@ -51,9 +32,3 @@ experience and preferences.
|
||||
individuals with experience with Docker containers and understand
|
||||
the pluses and minuses of a container-based install.
|
||||
|
||||
4. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
||||
|
||||
This method is suitable for running InvokeAI on a Google Colab
|
||||
account. It is recommended for individuals who have previously
|
||||
worked on the Colab and are comfortable with the Jupyter notebook
|
||||
environment.
|
||||
|
@ -13,7 +13,6 @@ dependencies:
|
||||
- cudatoolkit=11.6
|
||||
- pip:
|
||||
- albumentations==0.4.3
|
||||
- basicsr==1.4.1
|
||||
- dependency_injector==4.40.0
|
||||
- diffusers~=0.10
|
||||
- einops==0.3.0
|
||||
|
@ -8,8 +8,9 @@ facexlib
|
||||
flask==2.1.3
|
||||
flask_cors==3.0.10
|
||||
flask_socketio==5.3.0
|
||||
flaskwebgui==0.3.7
|
||||
flaskwebgui==1.0.3
|
||||
getpass_asterisk
|
||||
gfpgan==1.3.8
|
||||
huggingface-hub
|
||||
imageio
|
||||
imageio-ffmpeg
|
||||
@ -17,6 +18,7 @@ kornia
|
||||
numpy
|
||||
omegaconf
|
||||
opencv-python
|
||||
picklescan
|
||||
pillow
|
||||
pip>=22
|
||||
pudb
|
||||
@ -32,10 +34,7 @@ test-tube>=0.7.5
|
||||
torch-fidelity
|
||||
torchmetrics
|
||||
transformers~=4.25
|
||||
picklescan
|
||||
# git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan ; platform_system == 'Windows'
|
||||
git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan ; platform_system != 'Windows'
|
||||
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
||||
https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip#egg=k-diffusion
|
||||
https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.4.zip#egg=pypatchmatch
|
||||
https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip#egg=clip
|
||||
https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip#egg=clipseg
|
||||
|
@ -1,2 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
torch
|
||||
torchvision
|
||||
-e .
|
||||
|
@ -1,8 +1,6 @@
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
# Get hardware-appropriate torch/torchvision
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
||||
gfpgan
|
||||
basicsr
|
||||
torch==1.12.1
|
||||
torchvision==0.13.1
|
||||
-e .
|
||||
|
48
frontend/dist/assets/index-legacy-8e84772c.js
vendored
Normal file
48
frontend/dist/assets/index-legacy-8e84772c.js
vendored
Normal file
File diff suppressed because one or more lines are too long
623
frontend/dist/assets/index.637f12bd.js
vendored
623
frontend/dist/assets/index.637f12bd.js
vendored
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/index.81f1c71c.css
vendored
Normal file
1
frontend/dist/assets/index.81f1c71c.css
vendored
Normal file
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/index.c609c0c8.css
vendored
1
frontend/dist/assets/index.c609c0c8.css
vendored
File diff suppressed because one or more lines are too long
623
frontend/dist/assets/index.d864890e.js
vendored
Normal file
623
frontend/dist/assets/index.d864890e.js
vendored
Normal file
File diff suppressed because one or more lines are too long
4
frontend/dist/assets/polyfills-legacy-dde3a68a.js
vendored
Normal file
4
frontend/dist/assets/polyfills-legacy-dde3a68a.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
frontend/dist/assets/polyfills.1ff60148.js
vendored
Normal file
1
frontend/dist/assets/polyfills.1ff60148.js
vendored
Normal file
File diff suppressed because one or more lines are too long
11
frontend/dist/index.html
vendored
11
frontend/dist/index.html
vendored
@ -2,17 +2,24 @@
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<script type="module" crossorigin src="./assets/polyfills.1ff60148.js"></script>
|
||||
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index.637f12bd.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.c609c0c8.css">
|
||||
<script type="module" crossorigin src="./assets/index.d864890e.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.81f1c71c.css">
|
||||
<script type="module">try{import.meta.url;import("_").catch(()=>1);}catch(e){}window.__vite_is_modern_browser=true;</script>
|
||||
<script type="module">!function(){if(window.__vite_is_modern_browser)return;console.warn("vite: loading legacy build because dynamic import or import.meta.url is unsupported, syntax error above should be ignored");var e=document.getElementById("vite-legacy-polyfill"),n=document.createElement("script");n.src=e.src,n.onload=function(){System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))},document.body.appendChild(n)}();</script>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
|
||||
<script nomodule>!function(){var e=document,t=e.createElement("script");if(!("noModule"in t)&&"onbeforeload"in t){var n=!1;e.addEventListener("beforeload",(function(e){if(e.target===t)n=!0;else if(!e.target.hasAttribute("nomodule")||!n)return;e.preventDefault()}),!0),t.type="module",t.src=".",e.head.appendChild(t),t.remove()}}();</script>
|
||||
<script nomodule crossorigin id="vite-legacy-polyfill" src="./assets/polyfills-legacy-dde3a68a.js"></script>
|
||||
<script nomodule crossorigin id="vite-legacy-entry" data-src="./assets/index-legacy-8e84772c.js">System.import(document.getElementById('vite-legacy-entry').getAttribute('data-src'))</script>
|
||||
</body>
|
||||
|
||||
</html>
|
@ -53,6 +53,7 @@
|
||||
"@types/react-transition-group": "^4.4.5",
|
||||
"@typescript-eslint/eslint-plugin": "^5.36.2",
|
||||
"@typescript-eslint/parser": "^5.36.2",
|
||||
"@vitejs/plugin-legacy": "^3.0.1",
|
||||
"@vitejs/plugin-react": "^2.0.1",
|
||||
"eslint": "^8.23.0",
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
@ -60,6 +61,7 @@
|
||||
"patch-package": "^6.5.0",
|
||||
"postinstall-postinstall": "^2.1.0",
|
||||
"sass": "^1.55.0",
|
||||
"terser": "^5.16.1",
|
||||
"tsc-watch": "^5.0.3",
|
||||
"typescript": "^4.6.4",
|
||||
"vite": "^3.0.7",
|
||||
|
159
frontend/src/styles/Mixins/_Responsive.scss
Normal file
159
frontend/src/styles/Mixins/_Responsive.scss
Normal file
@ -0,0 +1,159 @@
|
||||
@media (max-width: 600px) {
|
||||
#root{
|
||||
.app-content{
|
||||
padding: 5px;
|
||||
.site-header {
|
||||
position: fixed;
|
||||
display: flex;
|
||||
height: 100px;
|
||||
z-index: 1;
|
||||
.site-header-left-side{
|
||||
position: absolute;
|
||||
display: flex;
|
||||
min-width: 145px;
|
||||
float: left;
|
||||
padding-left: 0;
|
||||
}
|
||||
.site-header-right-side{
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1fr 1fr 1fr 1fr 1fr;
|
||||
grid-template-rows: 25px 25px 25px;
|
||||
grid-template-areas: 'logoSpace logoSpace logoSpace sampler sampler sampler'
|
||||
'status status status status status status'
|
||||
'btn1 btn2 btn3 btn4 btn5 btn6';
|
||||
row-gap: 15px;
|
||||
.chakra-popover__popper{
|
||||
grid-area: logoSpace;
|
||||
}
|
||||
> :nth-child(1).chakra-text{
|
||||
grid-area: status;
|
||||
width: 100%;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
}
|
||||
> :nth-child(2){
|
||||
grid-area: sampler;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
select{
|
||||
width: 185px;
|
||||
margin-top: 10px;
|
||||
}
|
||||
.chakra-select__icon-wrapper{
|
||||
right:10px;
|
||||
svg{
|
||||
margin-top: 10px;
|
||||
}
|
||||
}
|
||||
}
|
||||
> :nth-child(3){
|
||||
grid-area: btn1;
|
||||
}
|
||||
> :nth-child(4){
|
||||
grid-area: btn2;
|
||||
}
|
||||
> :nth-child(6){
|
||||
grid-area: btn3;
|
||||
}
|
||||
> :nth-child(7){
|
||||
grid-area: btn4;
|
||||
}
|
||||
> :nth-child(8){
|
||||
grid-area: btn5;
|
||||
}
|
||||
> :nth-child(9){
|
||||
grid-area: btn6;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
.app-tabs{
|
||||
position: fixed;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
row-gap: 15px;
|
||||
max-width: 100%;
|
||||
overflow: hidden;
|
||||
margin-top: 120px;
|
||||
.app-tabs-list{
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
}
|
||||
.app-tabs-panels{
|
||||
overflow: hidden;
|
||||
overflow-y: scroll;
|
||||
.workarea-main{
|
||||
display: grid;
|
||||
grid-template-areas: 'workarea'
|
||||
'options'
|
||||
'gallery';
|
||||
row-gap: 15px;
|
||||
.options-panel-wrapper{
|
||||
grid-area: options;
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
height: inherit;
|
||||
overflow: inherit;
|
||||
padding: 0 10px;
|
||||
.main-options-row{
|
||||
max-width: 100%;
|
||||
}
|
||||
.advanced-settings-item{
|
||||
max-width: 100%;
|
||||
}
|
||||
}
|
||||
.workarea-children-wrapper{
|
||||
grid-area: workarea;
|
||||
.workarea-split-view{
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
.current-image-options{
|
||||
column-gap: 3px;
|
||||
}
|
||||
.text-to-image-area{
|
||||
padding: 0;
|
||||
}
|
||||
.current-image-preview {
|
||||
height: 430px;
|
||||
}
|
||||
|
||||
//image 2 image
|
||||
.image-upload-button {
|
||||
row-gap: 10px;
|
||||
padding: 5px;
|
||||
svg {
|
||||
width: 2rem;
|
||||
height: 2rem;
|
||||
margin-top: 10px;
|
||||
}
|
||||
}
|
||||
|
||||
//Cavas Painting
|
||||
.inpainting-settings{
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
row-gap: 10px;
|
||||
}
|
||||
.inpainting-canvas-area{
|
||||
.konvajs-content{
|
||||
height: 400px !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
.image-gallery-wrapper{
|
||||
grid-area: gallery;
|
||||
min-height: 400px;
|
||||
.image-gallery-popup{
|
||||
width: 100% !important;
|
||||
max-width: 100% !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
@forward './Shared';
|
||||
@forward './Buttons';
|
||||
@forward './Variables';
|
||||
@forward './Responsive';
|
@ -2,12 +2,20 @@ import { defineConfig } from 'vite';
|
||||
import react from '@vitejs/plugin-react';
|
||||
import eslint from 'vite-plugin-eslint';
|
||||
import tsconfigPaths from 'vite-tsconfig-paths';
|
||||
import legacy from '@vitejs/plugin-legacy';
|
||||
|
||||
// https://vitejs.dev/config/
|
||||
export default defineConfig(({ mode }) => {
|
||||
const common = {
|
||||
base: '',
|
||||
plugins: [react(), eslint(), tsconfigPaths()],
|
||||
plugins: [
|
||||
react(),
|
||||
eslint(),
|
||||
tsconfigPaths(),
|
||||
legacy({
|
||||
modernPolyfills: ['es.array.find-last'],
|
||||
}),
|
||||
],
|
||||
server: {
|
||||
// Proxy HTTP requests to the flask server
|
||||
proxy: {
|
||||
@ -35,7 +43,11 @@ export default defineConfig(({ mode }) => {
|
||||
},
|
||||
},
|
||||
build: {
|
||||
target: 'esnext',
|
||||
/**
|
||||
* We need to polyfill for Array.prototype.findLast(); the polyfill plugin above
|
||||
* overrides any target specified here.
|
||||
*/
|
||||
// target: 'esnext',
|
||||
chunkSizeWarningLimit: 1500, // we don't really care about chunk size
|
||||
},
|
||||
};
|
||||
|
@ -213,6 +213,11 @@
|
||||
dependencies:
|
||||
regenerator-runtime "^0.13.10"
|
||||
|
||||
"@babel/standalone@^7.20.6":
|
||||
version "7.20.6"
|
||||
resolved "https://registry.yarnpkg.com/@babel/standalone/-/standalone-7.20.6.tgz#7deb7ad244176414c3cbde020aad0607afdbe2fe"
|
||||
integrity sha512-u5at/CbBLETf7kx2LOY4XdhseD79Y099WZKAOMXeT8qvd9OSR515my2UNBBLY4qIht/Qi9KySeQHQwQwxJN4Sw==
|
||||
|
||||
"@babel/template@^7.18.10":
|
||||
version "7.18.10"
|
||||
resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.18.10.tgz#6f9134835970d1dbf0835c0d100c9f38de0c5e71"
|
||||
@ -1204,7 +1209,7 @@
|
||||
"@jridgewell/set-array" "^1.0.0"
|
||||
"@jridgewell/sourcemap-codec" "^1.4.10"
|
||||
|
||||
"@jridgewell/gen-mapping@^0.3.2":
|
||||
"@jridgewell/gen-mapping@^0.3.0", "@jridgewell/gen-mapping@^0.3.2":
|
||||
version "0.3.2"
|
||||
resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz#c1aedc61e853f2bb9f5dfe6d4442d3b565b253b9"
|
||||
integrity sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==
|
||||
@ -1223,7 +1228,15 @@
|
||||
resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72"
|
||||
integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==
|
||||
|
||||
"@jridgewell/sourcemap-codec@1.4.14", "@jridgewell/sourcemap-codec@^1.4.10":
|
||||
"@jridgewell/source-map@^0.3.2":
|
||||
version "0.3.2"
|
||||
resolved "https://registry.yarnpkg.com/@jridgewell/source-map/-/source-map-0.3.2.tgz#f45351aaed4527a298512ec72f81040c998580fb"
|
||||
integrity sha512-m7O9o2uR8k2ObDysZYzdfhb08VuEml5oWGiosa1VdaPZ/A6QyPkAJuwN0Q1lhULOf6B7MtQmHENS743hWtCrgw==
|
||||
dependencies:
|
||||
"@jridgewell/gen-mapping" "^0.3.0"
|
||||
"@jridgewell/trace-mapping" "^0.3.9"
|
||||
|
||||
"@jridgewell/sourcemap-codec@1.4.14", "@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.13":
|
||||
version "1.4.14"
|
||||
resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24"
|
||||
integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
|
||||
@ -1838,6 +1851,17 @@
|
||||
"@typescript-eslint/types" "5.44.0"
|
||||
eslint-visitor-keys "^3.3.0"
|
||||
|
||||
"@vitejs/plugin-legacy@^3.0.1":
|
||||
version "3.0.1"
|
||||
resolved "https://registry.yarnpkg.com/@vitejs/plugin-legacy/-/plugin-legacy-3.0.1.tgz#bccc0eaf15a64e1854313acebec879854e413deb"
|
||||
integrity sha512-XCtEjxoR3rmy000ujYRBp5kggWqzHz9+F20/yIMUWOzbvu0+KW1e14Fvb8h7SpNn+bfjGW1RiAs1Vrgb7Js+iQ==
|
||||
dependencies:
|
||||
"@babel/standalone" "^7.20.6"
|
||||
core-js "^3.26.1"
|
||||
magic-string "^0.27.0"
|
||||
regenerator-runtime "^0.13.11"
|
||||
systemjs "^6.13.0"
|
||||
|
||||
"@vitejs/plugin-react@^2.0.1":
|
||||
version "2.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-2.2.0.tgz#1b9f63b8b6bc3f56258d20cd19b33f5cc761ce6e"
|
||||
@ -1879,7 +1903,7 @@ acorn-jsx@^5.3.2:
|
||||
resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.3.2.tgz#7ed5bb55908b3b2f1bc55c6af1653bada7f07937"
|
||||
integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==
|
||||
|
||||
acorn@^8.8.0:
|
||||
acorn@^8.5.0, acorn@^8.8.0:
|
||||
version "8.8.1"
|
||||
resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.8.1.tgz#0a3f9cbecc4ec3bea6f0a80b66ae8dd2da250b73"
|
||||
integrity sha512-7zFpHzhnqYKrkYdUjF1HI1bzd0VygEGX8lFk4k5zVMqHEoES+P+7TKI+EvLO9WVMJ8eekdO0aDEK044xTXwPPA==
|
||||
@ -2002,6 +2026,11 @@ browserslist@^4.21.3:
|
||||
node-releases "^2.0.6"
|
||||
update-browserslist-db "^1.0.9"
|
||||
|
||||
buffer-from@^1.0.0:
|
||||
version "1.1.2"
|
||||
resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5"
|
||||
integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==
|
||||
|
||||
callsites@^3.0.0:
|
||||
version "3.1.0"
|
||||
resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73"
|
||||
@ -2073,6 +2102,11 @@ color-name@~1.1.4:
|
||||
resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2"
|
||||
integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
|
||||
|
||||
commander@^2.20.0:
|
||||
version "2.20.3"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33"
|
||||
integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==
|
||||
|
||||
commander@^4.0.0:
|
||||
version "4.1.1"
|
||||
resolved "https://registry.yarnpkg.com/commander/-/commander-4.1.1.tgz#9fd602bd936294e9e9ef46a3f4d6964044b18068"
|
||||
@ -2105,6 +2139,11 @@ copy-to-clipboard@3.3.1:
|
||||
dependencies:
|
||||
toggle-selection "^1.0.6"
|
||||
|
||||
core-js@^3.26.1:
|
||||
version "3.26.1"
|
||||
resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.26.1.tgz#7a9816dabd9ee846c1c0fe0e8fcad68f3709134e"
|
||||
integrity sha512-21491RRQVzUn0GGM9Z1Jrpr6PNPxPi+Za8OM9q4tksTSnlbXXGKK1nXNg/QvwFYettXvSX6zWKCtHHfjN4puyA==
|
||||
|
||||
cors@~2.8.5:
|
||||
version "2.8.5"
|
||||
resolved "https://registry.yarnpkg.com/cors/-/cors-2.8.5.tgz#eac11da51592dd86b9f06f6e7ac293b3df875d29"
|
||||
@ -3052,6 +3091,13 @@ magic-string@^0.26.7:
|
||||
dependencies:
|
||||
sourcemap-codec "^1.4.8"
|
||||
|
||||
magic-string@^0.27.0:
|
||||
version "0.27.0"
|
||||
resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.27.0.tgz#e4a3413b4bab6d98d2becffd48b4a257effdbbf3"
|
||||
integrity sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==
|
||||
dependencies:
|
||||
"@jridgewell/sourcemap-codec" "^1.4.13"
|
||||
|
||||
map-stream@~0.1.0:
|
||||
version "0.1.0"
|
||||
resolved "https://registry.yarnpkg.com/map-stream/-/map-stream-0.1.0.tgz#e56aa94c4c8055a16404a0674b78f215f7c8e194"
|
||||
@ -3555,7 +3601,7 @@ redux@^4.2.0:
|
||||
dependencies:
|
||||
"@babel/runtime" "^7.9.2"
|
||||
|
||||
regenerator-runtime@^0.13.10:
|
||||
regenerator-runtime@^0.13.10, regenerator-runtime@^0.13.11:
|
||||
version "0.13.11"
|
||||
resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz#f6dca3e7ceec20590d07ada785636a90cdca17f9"
|
||||
integrity sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==
|
||||
@ -3724,11 +3770,24 @@ socket.io@^4.5.2:
|
||||
resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c"
|
||||
integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==
|
||||
|
||||
source-map-support@~0.5.20:
|
||||
version "0.5.21"
|
||||
resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f"
|
||||
integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==
|
||||
dependencies:
|
||||
buffer-from "^1.0.0"
|
||||
source-map "^0.6.0"
|
||||
|
||||
source-map@^0.5.7:
|
||||
version "0.5.7"
|
||||
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc"
|
||||
integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==
|
||||
|
||||
source-map@^0.6.0:
|
||||
version "0.6.1"
|
||||
resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263"
|
||||
integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==
|
||||
|
||||
sourcemap-codec@^1.4.8:
|
||||
version "1.4.8"
|
||||
resolved "https://registry.yarnpkg.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz#ea804bd94857402e6992d05a38ef1ae35a9ab4c4"
|
||||
@ -3814,6 +3873,21 @@ supports-preserve-symlinks-flag@^1.0.0:
|
||||
resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09"
|
||||
integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==
|
||||
|
||||
systemjs@^6.13.0:
|
||||
version "6.13.0"
|
||||
resolved "https://registry.yarnpkg.com/systemjs/-/systemjs-6.13.0.tgz#7b28e74b44352e1650e8652499f42de724c3fc7f"
|
||||
integrity sha512-P3cgh2bpaPvAO2NE3uRp/n6hmk4xPX4DQf+UzTlCAycssKdqhp6hjw+ENWe+aUS7TogKRFtptMosTSFeC6R55g==
|
||||
|
||||
terser@^5.16.1:
|
||||
version "5.16.1"
|
||||
resolved "https://registry.yarnpkg.com/terser/-/terser-5.16.1.tgz#5af3bc3d0f24241c7fb2024199d5c461a1075880"
|
||||
integrity sha512-xvQfyfA1ayT0qdK47zskQgRZeWLoOQ8JQ6mIgRGVNwZKdQMU+5FkCBjmv4QjcrTzyZquRw2FVtlJSRUmMKQslw==
|
||||
dependencies:
|
||||
"@jridgewell/source-map" "^0.3.2"
|
||||
acorn "^8.5.0"
|
||||
commander "^2.20.0"
|
||||
source-map-support "~0.5.20"
|
||||
|
||||
text-table@^0.2.0:
|
||||
version "0.2.0"
|
||||
resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4"
|
||||
|
48
installer/create_installer.sh
Executable file
48
installer/create_installer.sh
Executable file
@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
VERSION=$(grep ^VERSION ../setup.py | awk '{ print $3 }' | sed "s/'//g" )
|
||||
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
echo Building installer zip fles for InvokeAI v$VERSION
|
||||
|
||||
# get rid of any old ones
|
||||
rm *.zip
|
||||
|
||||
rm -rf InvokeAI-Installer
|
||||
mkdir InvokeAI-Installer
|
||||
|
||||
cp -pr ../environments-and-requirements templates readme.txt InvokeAI-Installer/
|
||||
mkdir InvokeAI-Installer/templates/rootdir
|
||||
|
||||
cp -pr ../configs InvokeAI-Installer/templates/rootdir/
|
||||
|
||||
mkdir InvokeAI-Installer/templates/rootdir/{outputs,embeddings,models}
|
||||
|
||||
cp install.sh.in InvokeAI-Installer/install.sh
|
||||
chmod a+rx InvokeAI-Installer/install.sh
|
||||
|
||||
zip -r InvokeAI-installer-$VERSION-linux.zip InvokeAI-Installer
|
||||
zip -r InvokeAI-installer-$VERSION-mac.zip InvokeAI-Installer
|
||||
|
||||
# now do the windows installer
|
||||
rm InvokeAI-Installer/install.sh
|
||||
cp install.bat.in InvokeAI-Installer/install.bat
|
||||
cp WinLongPathsEnabled.reg InvokeAI-Installer/
|
||||
|
||||
# this gets rid of the "-e ." at the end of the windows requirements file
|
||||
# because it is easier to do it now than in the .bat install script
|
||||
egrep -v '^-e .' InvokeAI-Installer/environments-and-requirements/requirements-win-colab-cuda.txt >requirements.txt
|
||||
mv requirements.txt InvokeAI-Installer/environments-and-requirements/requirements-win-colab-cuda.txt
|
||||
zip -r InvokeAI-installer-$VERSION-windows.zip InvokeAI-Installer
|
||||
|
||||
# clean up
|
||||
rm -rf InvokeAI-Installer
|
||||
|
||||
|
||||
exit 0
|
||||
|
||||
|
215
installer/install.bat.in
Normal file
215
installer/install.bat.in
Normal file
@ -0,0 +1,215 @@
|
||||
@echo off
|
||||
setlocal EnableExtensions EnableDelayedExpansion
|
||||
|
||||
@rem This script requires the user to install Python 3.9 or higher. All other
|
||||
@rem requirements are downloaded as needed.
|
||||
|
||||
@rem change to the script's directory
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set "no_cache_dir=--no-cache-dir"
|
||||
if "%1" == "use-cache" (
|
||||
set "no_cache_dir="
|
||||
)
|
||||
|
||||
@rem Config
|
||||
@rem this should be changed to the tagged release!
|
||||
set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||
@rem set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip
|
||||
set INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
set TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||
set PYTHON_URL=https://www.python.org/downloads/windows/
|
||||
set MINIMUM_PYTHON_VERSION=3.9.0
|
||||
set PYTHON_URL=https://www.python.org/downloads/release/python-3109/
|
||||
|
||||
|
||||
set err_msg=An error has occurred and the script could not continue.
|
||||
|
||||
@rem --------------------------- Intro -------------------------------
|
||||
echo This script will install InvokeAI and its dependencies. Before you start,
|
||||
echo please make sure to do the following:
|
||||
echo 1. Install python 3.9 or higher.
|
||||
echo 2. Double-click on the file WinLongPathsEnabled.reg in order to
|
||||
echo enable long path support on your system.
|
||||
echo 3. Some users have found they need to install the Visual C++ core
|
||||
echo libraries or else they experience DLL loading problems at the end of the install.
|
||||
echo Visual C++ is very likely already installed on your system, but if you get DLL
|
||||
echo issues, please download and install the libraries by going to:
|
||||
echo https://learn.microsoft.com/en-US/cpp/windows/latest-supported-vc-redist?view=msvc-170
|
||||
echo.
|
||||
echo See %INSTRUCTIONS% for more details.
|
||||
echo.
|
||||
pause
|
||||
|
||||
@rem ---------------------------- check Python version ---------------
|
||||
echo ***** Checking and Updating Python *****
|
||||
|
||||
call python --version >.tmp1 2>.tmp2
|
||||
if %errorlevel% == 1 (
|
||||
set err_msg=Please install Python 3.9 or higher. See %INSTRUCTIONS% for details.
|
||||
goto err_exit
|
||||
)
|
||||
|
||||
for /f "tokens=2" %%i in (.tmp1) do set python_version=%%i
|
||||
if "%python_version%" == "" (
|
||||
set err_msg=No python was detected on your system. Please install Python version %MINIMUM_PYTHON_VERSION% or higher. We recommend Python 3.10.9 from %PYTHON_URL%
|
||||
goto err_exit
|
||||
)
|
||||
|
||||
call :compareVersions %MINIMUM_PYTHON_VERSION% %python_version%
|
||||
if %errorlevel% == 1 (
|
||||
set err_msg=Your version of Python is too low. You need at least %MINIMUM_PYTHON_VERSION% but you have %python_version%. We recommend Python 3.10.9 from %PYTHON_URL%
|
||||
goto err_exit
|
||||
)
|
||||
|
||||
@rem Cleanup
|
||||
del /q .tmp1 .tmp2
|
||||
|
||||
echo Updating PIP...
|
||||
call python -m pip install --no-warn-script-location -q --upgrade pip
|
||||
|
||||
@rem --------------------- Get the requirements file ------------
|
||||
echo.
|
||||
echo Setting up requirements file for your system.
|
||||
copy /y environments-and-requirements\requirements-win-colab-cuda.txt .\requirements.txt
|
||||
|
||||
@rem --------------------- Get the root directory for installation ------------
|
||||
set rootdir=""
|
||||
set response=""
|
||||
set selection=""
|
||||
:pick_rootdir
|
||||
if %rootdir% neq "" goto :done
|
||||
set /p selection=Select the path to install InvokeAI's directory into [%UserProfile%]:
|
||||
if %selection% == "" set selection=%UserProfile%
|
||||
set dest=%selection%\invokeai
|
||||
if exist %dest% (
|
||||
set response=y
|
||||
set /p response=The directory %dest% exists. Do you wish to resume install from a previous attempt? [Y/n]:
|
||||
if !response! == "" set response=y
|
||||
if /I !response! == y (set rootdir=%dest%) else (goto :pick_rootdir)
|
||||
) else (
|
||||
set rootdir=!dest!
|
||||
)
|
||||
set response=y
|
||||
set /p response="You have chosen to install InvokeAI into %rootdir%. OK? [Y/n]: "
|
||||
if !response! == "" set response=y
|
||||
if /I !response! neq y set rootdir=""
|
||||
goto :pick_rootdir
|
||||
:done
|
||||
|
||||
@rem ---------------------- Initialize the runtime directory ---------------------
|
||||
echo.
|
||||
echo *** Creating Runtime Directory %rootdir% ***
|
||||
if not exist %rootdir% mkdir %rootdir%
|
||||
@rem for unknown reasons the mkdir works but returns an error code
|
||||
if not exist %rootdir% (
|
||||
set err_msg=Could not create the directory %rootdir%. Please check the directory's permissions and try again.
|
||||
goto :err_exit
|
||||
)
|
||||
echo Successful.
|
||||
|
||||
@rem --------------------------- Create and populate .venv ---------------------------
|
||||
echo.
|
||||
echo ** Creating Virtual Environment for InvokeAI **
|
||||
call python -mvenv %rootdir%\.venv
|
||||
if %errorlevel% neq 0 (
|
||||
set err_msg=Could not create virtual environment %rootdir%\.venv. Please check the directory's permissions and try again.
|
||||
goto :err_exit
|
||||
)
|
||||
echo Successful.
|
||||
|
||||
echo.
|
||||
echo *** Installing InvokeAI Requirements ***
|
||||
call %rootdir%\.venv\Scripts\activate.bat
|
||||
copy environments-and-requirements\requirements-win-colab-cuda.txt .\requirements.txt
|
||||
call python -mpip install -r requirements.txt
|
||||
if %errorlevel% neq 0 (
|
||||
set err_msg=Installation of requirements failed. See above for errors and check %TROUBLESHOOTING% for potential solutions.
|
||||
goto :err_exit
|
||||
)
|
||||
echo Installation successful.
|
||||
|
||||
echo.
|
||||
echo *** Installing InvokeAI Modules and Executables ***
|
||||
call python -mpip install %INVOKE_AI_SRC%
|
||||
if %errorlevel% neq 0 (
|
||||
set err_msg=Installation of InvokeAI failed. See above for errors and check %TROUBLESHOOTING% for potential solutions.
|
||||
goto :err_exit
|
||||
)
|
||||
echo Installation successful.
|
||||
|
||||
@rem --------------------------- Set up the root directory ---------------------------
|
||||
xcopy /E /Y .\templates\rootdir %rootdir%
|
||||
PUSHD "%rootdir%"
|
||||
call .venv\Scripts\python .venv\Scripts\configure_invokeai.py --root="%rootdir%"
|
||||
if %errorlevel% neq 0 (
|
||||
set err_msg=Configuration failed. See above for error messages and check %TROUBLESHOOTING% for potential solutions.
|
||||
goto :err_exit
|
||||
)
|
||||
POPD
|
||||
copy .\templates\invoke.bat.in %rootdir%\invoke.bat
|
||||
copy .\templates\update.bat.in %rootdir%\update.bat
|
||||
|
||||
@rem so that update.bat works
|
||||
mkdir %rootdir%\environments-and-requirements
|
||||
xcopy /I /Y .\environments-and-requirements %rootdir%\environments-and-requirements
|
||||
copy .\requirements.txt %rootdir%\requirements.txt
|
||||
|
||||
|
||||
echo.
|
||||
echo ***** Finished configuration *****
|
||||
echo All done. Execute the file %rootdir%\invoke.bat to start InvokeAI.
|
||||
pause
|
||||
deactivate
|
||||
exit
|
||||
|
||||
@rem ------------------------ Subroutines ---------------
|
||||
@rem routine to do comparison of semantic version numbers
|
||||
@rem found at https://stackoverflow.com/questions/15807762/compare-version-numbers-in-batch-file
|
||||
:compareVersions
|
||||
::
|
||||
:: Compares two version numbers and returns the result in the ERRORLEVEL
|
||||
::
|
||||
:: Returns 1 if version1 > version2
|
||||
:: 0 if version1 = version2
|
||||
:: -1 if version1 < version2
|
||||
::
|
||||
:: The nodes must be delimited by . or , or -
|
||||
::
|
||||
:: Nodes are normally strictly numeric, without a 0 prefix. A letter suffix
|
||||
:: is treated as a separate node
|
||||
::
|
||||
setlocal enableDelayedExpansion
|
||||
set "v1=%~1"
|
||||
set "v2=%~2"
|
||||
call :divideLetters v1
|
||||
call :divideLetters v2
|
||||
:loop
|
||||
call :parseNode "%v1%" n1 v1
|
||||
call :parseNode "%v2%" n2 v2
|
||||
if %n1% gtr %n2% exit /b 1
|
||||
if %n1% lss %n2% exit /b -1
|
||||
if not defined v1 if not defined v2 exit /b 0
|
||||
if not defined v1 exit /b -1
|
||||
if not defined v2 exit /b 1
|
||||
goto :loop
|
||||
|
||||
|
||||
:parseNode version nodeVar remainderVar
|
||||
for /f "tokens=1* delims=.,-" %%A in ("%~1") do (
|
||||
set "%~2=%%A"
|
||||
set "%~3=%%B"
|
||||
)
|
||||
exit /b
|
||||
|
||||
|
||||
:divideLetters versionVar
|
||||
for %%C in (a b c d e f g h i j k l m n o p q r s t u v w x y z) do set "%~1=!%~1:%%C=.%%C!"
|
||||
exit /b
|
||||
|
||||
:err_exit
|
||||
echo %err_msg%
|
||||
echo The installer will exit now.
|
||||
pause
|
||||
exit /b
|
||||
|
217
installer/install.sh.in
Normal file
217
installer/install.sh.in
Normal file
@ -0,0 +1,217 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
# make sure we are not already in a venv
|
||||
# (don't need to check status)
|
||||
deactivate >/dev/null 2>&1
|
||||
|
||||
# this should be changed to the tagged release!
|
||||
INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||
# INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip
|
||||
INSTRUCTIONS=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
TROUBLESHOOTING=https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting
|
||||
MINIMUM_PYTHON_VERSION=3.9.0
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
if [ "$OS_NAME" == "osx" ]; then
|
||||
echo "Something went wrong while installing InvokeAI and/or its requirements."
|
||||
echo "You may need to use the Xcode command line tools to proceed. See step number 3 of"
|
||||
echo "https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#walk_through for"
|
||||
echo "installation instructions and then run this script again."
|
||||
else
|
||||
echo "Something went wrong while installing InvokeAI and/or its requirements."
|
||||
echo "See https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#troubleshooting for troubleshooting"
|
||||
echo "tips, or visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
fi
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
function readinput() {
|
||||
local CLEAN_ARGS=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
local i="$1"
|
||||
case "$i" in
|
||||
"-i")
|
||||
if read -i "default" 2>/dev/null <<< "test"; then
|
||||
CLEAN_ARGS="$CLEAN_ARGS -i \"$2\""
|
||||
fi
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
"-p")
|
||||
CLEAN_ARGS="$CLEAN_ARGS -p \"$2\""
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
CLEAN_ARGS="$CLEAN_ARGS $1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
eval read $CLEAN_ARGS
|
||||
}
|
||||
|
||||
|
||||
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
|
||||
|
||||
echo "InvokeAI simple installer..."
|
||||
echo ""
|
||||
echo "Some of the installation steps take a long time to run. Please be patient."
|
||||
echo "If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry."
|
||||
read -n 1 -s -r -p "<Press any key to start the install>"
|
||||
echo ""
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="osx";;
|
||||
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) OS_ARCH="64";;
|
||||
arm64*) OS_ARCH="arm64";;
|
||||
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
||||
esac
|
||||
|
||||
echo "Installing for $OS_NAME-$OS_ARCH"
|
||||
# confirm that python is installed and is up to date
|
||||
|
||||
PYTHON=""
|
||||
for candidate in python3.10 python3.9 python3 python python3.11 ; do
|
||||
if ppath=`which $candidate`; then
|
||||
python_version=$($ppath -V | awk '{ print $2 }')
|
||||
if [ $(version $python_version) -ge $(version "$MINIMUM_PYTHON_VERSION") ]; then
|
||||
PYTHON=$ppath
|
||||
echo Python $python_version found at $PYTHON
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$PYTHON" ]; then
|
||||
echo "A suitable Python interpreter could not be found"
|
||||
echo "Please install Python 3.9 or higher before running this script. See instructions at $INSTRUCTIONS for help."
|
||||
read -p "Press any key to exit"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
if [ "$OS_NAME" == "osx" ]; then
|
||||
xcode_path=$(xcode-select --print-path)
|
||||
_err_exit $? "xcode_path command not found"
|
||||
export CPPFLAGS="-I$xcode_path/Library/Frameworks/Python3.framework/Versions/Current/Headers"
|
||||
echo "Will compile wheels with CPPFLAGS=$CPPFLAGS"
|
||||
fi
|
||||
|
||||
ROOTDIR=""
|
||||
while [ "$ROOTDIR" == "" ]
|
||||
do
|
||||
echo
|
||||
readinput -e -p "Select your preferred location for the 'invokeai' directory [$HOME]: " -i $HOME input
|
||||
ROOTDIR=${input:=$HOME}/invokeai
|
||||
read -e -p "InvokeAI will be installed into $ROOTDIR. OK? [y]: " input
|
||||
RESPONSE=${input:='y'}
|
||||
if [ "$RESPONSE" == 'y' ]; then
|
||||
if [ -e $ROOTDIR ]; then
|
||||
echo
|
||||
read -e -p "Directory $ROOTDIR already exists. Do you want to resume an interrupted install? [y]: " input
|
||||
RESPONSE=${input:='y'}
|
||||
if [ "$RESPONSE" != 'y' ]; then
|
||||
ROOTDIR=""
|
||||
fi
|
||||
else
|
||||
mkdir -p $ROOTDIR
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Could not create $ROOTDIR. Try again with a different install location."
|
||||
ROOTDIR=""
|
||||
fi
|
||||
fi
|
||||
else
|
||||
ROOTDIR=""
|
||||
fi
|
||||
done
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "** Creating Virtual Environment for InvokeAI **"
|
||||
|
||||
$PYTHON -mpip install --upgrade pip
|
||||
$PYTHON -mvenv $ROOTDIR/.venv
|
||||
_err_exit $? "Python failed to create virtual environment $ROOTDIR/.venv. Please see $TROUBLESHOOTING for help."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "** Activating Virtual Environment for InvokeAI **"
|
||||
|
||||
source $ROOTDIR/.venv/bin/activate
|
||||
_err_exit $? "Failed to activate virtual evironment $ROOTDIR/.venv. Please see $TROUBLESHOOTING for help."
|
||||
|
||||
PYTHON=$ROOTDIR/.venv/bin/python
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "*** Installing InvokeAI Dependencies ***"
|
||||
|
||||
if [ "$OS_NAME" == "osx" ]; then
|
||||
echo "macOS detected. Installing MPS and CPU support."
|
||||
egrep -v '^-e .' environments-and-requirements/requirements-mac-mps-cpu.txt >requirements.txt
|
||||
else
|
||||
if (lsmod | grep amdgpu) &>/dev/null ; then
|
||||
echo "Linux system with AMD GPU driver detected. Installing ROCm and CPU support"
|
||||
egrep -v '^-e .' environments-and-requirements/requirements-lin-amd.txt >requirements.txt
|
||||
else
|
||||
echo "Linux system detected. Installing CUDA and CPU support."
|
||||
egrep -v '^-e .' environments-and-requirements/requirements-lin-cuda.txt >requirements.txt
|
||||
fi
|
||||
fi
|
||||
|
||||
$PYTHON -mpip install -r requirements.txt
|
||||
_err_exit $? "Failed to install InvokeAI's dependencies."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "*** Installing InvokeAI Modules and Executables ***"
|
||||
$PYTHON -mpip install $INVOKE_AI_SRC
|
||||
_err_exit $? "Installation of InvokeAI failed."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo " *** Setting Up Root Directory $ROOTDIR *** "
|
||||
cp -pr templates/rootdir/* $ROOTDIR/
|
||||
cp templates/invoke.sh.in $ROOTDIR/invoke.sh
|
||||
chmod a+rx $ROOTDIR/invoke.sh
|
||||
cp templates/update.sh.in $ROOTDIR/update.sh
|
||||
chmod a+rx $ROOTDIR/update.sh
|
||||
|
||||
# This allows the updater to work!
|
||||
cp -pr environments-and-requirements requirements.txt $ROOTDIR/
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
echo
|
||||
echo "*** Confguring InvokeAI ***"
|
||||
pushd $ROOTDIR
|
||||
./.venv/bin/configure_invokeai.py --root=$ROOTDIR
|
||||
_err_exit $? "Initial configuration failed. Please see above error messages and $TROUBLESHOOTING for help."
|
||||
|
||||
#--------------------------------------------------------------------------------
|
||||
popd
|
||||
cp templates/invoke.sh.in $ROOTDIR/invoke.sh
|
||||
chmod a+rx $ROOTDIR/invoke.sh
|
||||
|
||||
cp templates/update.sh.in $ROOTDIR/update.sh
|
||||
chmod a+rx $ROOTDIR/update.sh
|
||||
|
||||
echo "You may now run InvokeAI by entering the directory $ROOTDIR and running invoke.sh"
|
52
installer/readme.txt
Normal file
52
installer/readme.txt
Normal file
@ -0,0 +1,52 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Preparations:
|
||||
|
||||
You will need to install Python 3.9 or higher for this installer
|
||||
to work. Instructions are given here:
|
||||
https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
||||
|
||||
Before you start the installer, please open up your system's command
|
||||
line window (Terminal or Command) and type the commands:
|
||||
|
||||
python --version
|
||||
|
||||
If all is well, it will print "Python 3.X.X", where the version number
|
||||
is at least 3.9.1
|
||||
|
||||
If this works, check the version of the Python package manager, pip:
|
||||
|
||||
pip --version
|
||||
|
||||
You should get a message that indicates that the pip package
|
||||
installer was derived from Python 3.9 or 3.10. For example:
|
||||
"pip 22.3.1 from /usr/bin/pip (python 3.9)"
|
||||
|
||||
Long Paths on Windows:
|
||||
|
||||
If you are on Windows, you will need to enable Windows Long Paths to
|
||||
run InvokeAI successfully. If you're not sure what this is, you
|
||||
almost certainly need to do this.
|
||||
|
||||
Simply double-click the "WinLongPathsEnabled.reg" file located in
|
||||
this directory, and approve the Windows warnings. Note that you will
|
||||
need to have admin privileges in order to do this.
|
||||
|
||||
Launching the installer:
|
||||
|
||||
Windows: double-click the 'install.bat' file (while keeping it inside
|
||||
the InvokeAI-Installer folder).
|
||||
|
||||
Linux and Mac: Please open the terminal application and run
|
||||
'./install.sh' (while keeping it inside the InvokeAI-Installer
|
||||
folder).
|
||||
|
||||
The installer will create a directory named "invokeai" in the folder
|
||||
of your choice. This directory contains everything you need to run
|
||||
invokeai. Once InvokeAI is up and running, you may delete the
|
||||
InvokeAI-Installer folder at your convenience.
|
||||
|
||||
For more information, please see
|
||||
https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/
|
37
installer/templates/invoke.bat.in
Normal file
37
installer/templates/invoke.bat.in
Normal file
@ -0,0 +1,37 @@
|
||||
@echo off
|
||||
|
||||
PUSHD "%~dp0"
|
||||
setlocal
|
||||
|
||||
call .venv\Scripts\activate.bat
|
||||
set INVOKEAI_ROOT=.
|
||||
|
||||
echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 2. browser-based UI
|
||||
echo 3. open the developer console
|
||||
set /P restore="Please enter 1, 2 or 3: "
|
||||
IF /I "%restore%" == "1" (
|
||||
echo Starting the InvokeAI command-line..
|
||||
python .venv\Scripts\invoke.py %*
|
||||
) ELSE IF /I "%restore%" == "2" (
|
||||
echo Starting the InvokeAI browser-based UI..
|
||||
python .venv\Scripts\invoke.py --web %*
|
||||
) ELSE IF /I "%restore%" == "3" (
|
||||
echo Developer Console
|
||||
echo Python command is:
|
||||
where python
|
||||
echo Python version is:
|
||||
python --version
|
||||
echo *************************
|
||||
echo You are now in the system shell, with the local InvokeAI Python virtual environment activated,
|
||||
echo so that you can troubleshoot this InvokeAI installation as necessary.
|
||||
echo *************************
|
||||
echo *** Type `exit` to quit this shell and deactivate the Python virtual environment ***
|
||||
call cmd /k
|
||||
) ELSE (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
endlocal
|
22
source_installer/invoke.sh.in → installer/templates/invoke.sh.in
Executable file → Normal file
22
source_installer/invoke.sh.in → installer/templates/invoke.sh.in
Executable file → Normal file
@ -1,19 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
set -eu
|
||||
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||
. .venv/bin/activate
|
||||
|
||||
conda activate invokeai
|
||||
export INVOKEAI_ROOT="$scriptdir"
|
||||
|
||||
# set required env var for torch on mac MPS
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
if [ "$(uname -s)" == "Darwin" ]; then
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
fi
|
||||
|
||||
if [ "$0" != "bash" ]; then
|
||||
echo "Do you want to generate images using the"
|
||||
@ -22,8 +22,8 @@ if [ "$0" != "bash" ]; then
|
||||
echo "3. open the developer console"
|
||||
read -p "Please enter 1, 2, or 3: " yn
|
||||
case $yn in
|
||||
1 ) printf "\nStarting the InvokeAI command-line..\n"; python scripts/invoke.py $*;;
|
||||
2 ) printf "\nStarting the InvokeAI browser-based UI..\n"; python scripts/invoke.py --web $*;;
|
||||
1 ) printf "\nStarting the InvokeAI command-line..\n"; .venv/bin/python .venv/bin/invoke.py $*;;
|
||||
2 ) printf "\nStarting the InvokeAI browser-based UI..\n"; .venv/bin/python .venv/bin/invoke.py --web $*;;
|
||||
3 ) printf "\nDeveloper Console:\n"; file_name=$(basename "${BASH_SOURCE[0]}"); bash --init-file "$file_name";;
|
||||
* ) echo "Invalid selection"; exit;;
|
||||
esac
|
52
installer/templates/update.bat.in
Normal file
52
installer/templates/update.bat.in
Normal file
@ -0,0 +1,52 @@
|
||||
@echo off
|
||||
setlocal EnableExtensions EnableDelayedExpansion
|
||||
|
||||
PUSHD "%~dp0"
|
||||
|
||||
set INVOKE_AI_SRC=https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||
set arg=%1
|
||||
if "%arg%" neq "" (
|
||||
if "%arg:~0,4%" neq "http" (
|
||||
echo Usage: update.bat ^<release URL^>.zip
|
||||
echo Updates InvokeAI to use the indicated version of the code base.
|
||||
echo Find the zip file for the release you want, and pass it as the argument.
|
||||
echo For example update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.4.zip
|
||||
echo.
|
||||
echo If no argument provided then will install the most recent development version, equivalent to
|
||||
echo update.bat https://github.com/invoke-ai/InvokeAI/archive/main.zip
|
||||
exit /b
|
||||
) else (
|
||||
set INVOKE_AI_SRC=%arg%
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
call .venv\Scripts\activate.bat
|
||||
|
||||
echo This script will update InvokeAI and all its dependencies to !INVOKE_AI_SRC!.
|
||||
echo If you do not want to do this, press control-C now!
|
||||
pause
|
||||
|
||||
call pip install -r requirements.txt
|
||||
if %errorlevel% neq 0 (
|
||||
echo Installation of requirements failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
exit /b
|
||||
)
|
||||
|
||||
call pip install !INVOKE_AI_SRC!
|
||||
if %errorlevel% neq 0 (
|
||||
echo Installation of InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
exit /b
|
||||
)
|
||||
|
||||
call .venv\Scripts\python .venv\Scripts\configure_invokeai.py --root="%rootdir%"
|
||||
|
||||
if %errorlevel% neq 0 (
|
||||
echo Configuration InvokeAI failed. See https://invoke-ai.github.io/InvokeAI/installation/INSTALL_AUTOMATED/#troubleshooting for suggestions.
|
||||
exit /b
|
||||
)
|
||||
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
endlocal
|
||||
|
52
installer/templates/update.sh.in
Normal file
52
installer/templates/update.sh.in
Normal file
@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
|
||||
if [ $# -ge 1 ] && [ "${1:0:4}" != "http" ]; then
|
||||
echo "Usage: update.sh <release URL>.zip"
|
||||
echo "Updates InvokeAI to use the indicated version of the code base."
|
||||
echo "Find the zip file for the release you want, and pass it as the argument."
|
||||
echo "For example update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.3.zip"
|
||||
echo ""
|
||||
echo "If no argument provided then will install the most recent development version, equivalent to"
|
||||
echo "update.sh https://github.com/invoke-ai/InvokeAI/archive/main.zip"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
INVOKE_AI_SRC=${1:-https://github.com/invoke-ai/InvokeAI/archive/main.zip}
|
||||
|
||||
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||
scriptdir=$(dirname "$0")
|
||||
cd "$scriptdir"
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing InvokeAI and/or its requirements."
|
||||
echo "Update cannot continue. Please report this error to https://github.com/invoke-ai/InvokeAI/issues"
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
echo This script will update InvokeAI and all its dependencies from $INVOKE_AI_SRC.
|
||||
echo If you do not want to do this, press control-C now!
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
. .venv/bin/activate
|
||||
|
||||
pip install -r requirements.txt
|
||||
_err_exit $? "The pip program failed to install InvokeAI's requirements."
|
||||
|
||||
pip install $INVOKE_AI_SRC
|
||||
_err_exit $? "The pip program failed to install InvokeAI."
|
||||
|
||||
python .venv/bin/configure_invoke.py
|
||||
_err_exit $? "The configure script failed to run successfully."
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -132,7 +132,6 @@ gr = Generate(
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class Generate:
|
||||
"""Generate class
|
||||
Stores default values for multiple configuration items
|
||||
@ -458,6 +457,11 @@ class Generate:
|
||||
init_image = None
|
||||
mask_image = None
|
||||
|
||||
|
||||
if self.free_gpu_mem and self.model.cond_stage_model.device != self.model.device:
|
||||
self.model.cond_stage_model.device = self.model.device
|
||||
self.model.cond_stage_model.to(self.model.device)
|
||||
|
||||
try:
|
||||
uc, c, extra_conditioning_info = get_uc_and_c_and_ec(
|
||||
prompt, model =self.model,
|
||||
|
@ -46,7 +46,6 @@ def main():
|
||||
args.max_loaded_models = 1
|
||||
|
||||
# alert - setting globals here
|
||||
Globals.root = os.path.expanduser(args.root_dir or os.environ.get('INVOKEAI_ROOT') or os.path.abspath('.'))
|
||||
Globals.try_patchmatch = args.patchmatch
|
||||
Globals.always_use_cpu = args.always_use_cpu
|
||||
|
||||
|
@ -0,0 +1 @@
|
||||
__version__='2.2.4'
|
@ -121,7 +121,7 @@ PRECISION_CHOICES = [
|
||||
|
||||
# is there a way to pick this up during git commits?
|
||||
APP_ID = 'invoke-ai/InvokeAI'
|
||||
APP_VERSION = 'v2.2.3'
|
||||
APP_VERSION = 'v2.2.4'
|
||||
|
||||
class ArgFormatter(argparse.RawTextHelpFormatter):
|
||||
# use defined argument order to display usage
|
||||
@ -174,14 +174,20 @@ class Args(object):
|
||||
'''Parse the shell switches and store.'''
|
||||
try:
|
||||
sysargs = sys.argv[1:]
|
||||
initfile = os.path.expanduser(Globals.initfile)
|
||||
# pre-parse to get the root directory; ignore the rest
|
||||
switches = self._arg_parser.parse_args(sysargs)
|
||||
Globals.root = os.path.abspath(switches.root_dir or Globals.root)
|
||||
|
||||
# now use root directory to find the init file
|
||||
initfile = os.path.expanduser(os.path.join(Globals.root,Globals.initfile))
|
||||
legacyinit = os.path.expanduser('~/.invokeai')
|
||||
if os.path.exists(initfile):
|
||||
print(f'>> Initialization file {initfile} found. Loading...')
|
||||
sysargs.insert(0,f'@{initfile}')
|
||||
else:
|
||||
from ldm.invoke.CLI import emergency_model_reconfigure
|
||||
emergency_model_reconfigure()
|
||||
sys.exit(-1)
|
||||
elif os.path.exists(legacyinit):
|
||||
print(f'>> WARNING: Old initialization file found at {legacyinit}. This location is deprecated. Please move it to {Globals.root}/invokeai.init.')
|
||||
sysargs.insert(0,f'@{legacyinit}')
|
||||
|
||||
self._arg_switches = self._arg_parser.parse_args(sysargs)
|
||||
return self._arg_switches
|
||||
except Exception as e:
|
||||
@ -413,7 +419,7 @@ class Args(object):
|
||||
model_group.add_argument(
|
||||
'--root_dir',
|
||||
default=None,
|
||||
help='Path to directory containing "models", "outputs" and "configs". If not present will try to read from ~/.invokeai and then from environment variable INVOKEAI_ROOT. Defaults to the current directory as a last resort.',
|
||||
help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai.',
|
||||
)
|
||||
model_group.add_argument(
|
||||
'--config',
|
||||
|
@ -17,10 +17,10 @@ from argparse import Namespace
|
||||
Globals = Namespace()
|
||||
|
||||
# This is usually overwritten by the command line and/or environment variables
|
||||
Globals.root = '.'
|
||||
Globals.root = os.path.abspath(os.environ.get('INVOKEAI_ROOT') or os.path.expanduser('~/invokeai'))
|
||||
|
||||
# Where to look for the initialization file
|
||||
Globals.initfile = os.path.expanduser('~/.invokeai')
|
||||
Globals.initfile = 'invokeai.init'
|
||||
|
||||
# Awkward workaround to disable attempted loading of pypatchmatch
|
||||
# which is causing CI tests to error out.
|
||||
|
@ -57,8 +57,13 @@ def retrieve_metadata(img_path):
|
||||
metadata stored there, as a dict
|
||||
'''
|
||||
im = Image.open(img_path)
|
||||
md = im.text.get('sd-metadata', '{}')
|
||||
dream_prompt = im.text.get('Dream', '')
|
||||
if hasattr(im, 'text'):
|
||||
md = im.text.get('sd-metadata', '{}')
|
||||
dream_prompt = im.text.get('Dream', '')
|
||||
else:
|
||||
# When trying to retrieve metadata from images without a 'text' payload, such as JPG images.
|
||||
md = '{}'
|
||||
dream_prompt = ''
|
||||
return {'sd-metadata': json.loads(md), 'Dream': dream_prompt}
|
||||
|
||||
def write_metadata(img_path:str, meta:dict):
|
||||
|
@ -208,9 +208,12 @@ class KSampler(Sampler):
|
||||
model_wrap_cfg = CFGDenoiser(self.model, threshold=threshold, warmup=max(0.8*S,S-10))
|
||||
model_wrap_cfg.prepare_to_sample(S, extra_conditioning_info=extra_conditioning_info)
|
||||
|
||||
attention_map_token_ids = range(1, extra_conditioning_info.tokens_count_including_eos_bos - 1)
|
||||
attention_maps_saver = None if attention_maps_callback is None else AttentionMapSaver(token_ids = attention_map_token_ids, latents_shape=x.shape[-2:])
|
||||
if attention_maps_callback is not None:
|
||||
# setup attention maps saving. checks for None are because there are multiple code paths to get here.
|
||||
attention_maps_saver = None
|
||||
if attention_maps_callback is not None and extra_conditioning_info is not None:
|
||||
eos_token_index = extra_conditioning_info.tokens_count_including_eos_bos - 1
|
||||
attention_map_token_ids = range(1, eos_token_index)
|
||||
attention_maps_saver = AttentionMapSaver(token_ids = attention_map_token_ids, latents_shape=x.shape[-2:])
|
||||
model_wrap_cfg.invokeai_diffuser.setup_attention_map_saving(attention_maps_saver)
|
||||
|
||||
extra_args = {
|
||||
@ -226,7 +229,7 @@ class KSampler(Sampler):
|
||||
),
|
||||
None,
|
||||
)
|
||||
if attention_maps_callback is not None:
|
||||
if attention_maps_saver is not None:
|
||||
attention_maps_callback(attention_maps_saver)
|
||||
return sampling_result
|
||||
|
||||
|
60
scripts/configure_invokeai.py
Executable file → Normal file
60
scripts/configure_invokeai.py
Executable file → Normal file
@ -47,7 +47,7 @@ Dataset_path = './configs/INITIAL_MODELS.yaml'
|
||||
Default_config_file = './configs/models.yaml'
|
||||
SD_Configs = './configs/stable-diffusion'
|
||||
|
||||
assert os.path.exists(Dataset_path),"The configs directory cannot be found. Please run this script from within the InvokeAI distribution directory, or from within the invokeai runtime directory."
|
||||
assert os.path.exists(Dataset_path),"The configs directory cannot be found. Please run this script from within the invokeai runtime directory."
|
||||
|
||||
Datasets = OmegaConf.load(Dataset_path)
|
||||
completer = generic_completer(['yes','no'])
|
||||
@ -241,7 +241,7 @@ This involves a few easy steps.
|
||||
"Role" should be "read").
|
||||
|
||||
Now copy the token to your clipboard and paste it at the prompt. Windows
|
||||
users can paste with right-click.
|
||||
users can paste with right-click or Ctrl-Shift-V.
|
||||
Token: '''
|
||||
)
|
||||
access_token = getpass_asterisk.getpass_asterisk()
|
||||
@ -652,22 +652,7 @@ def get_root(root:str=None)->str:
|
||||
elif os.environ.get('INVOKEAI_ROOT'):
|
||||
return os.environ.get('INVOKEAI_ROOT')
|
||||
else:
|
||||
init_file = os.path.expanduser(Globals.initfile)
|
||||
if not os.path.exists(init_file):
|
||||
return None
|
||||
|
||||
# if we get here, then we read from initfile
|
||||
root = None
|
||||
with open(init_file, 'r') as infile:
|
||||
lines = infile.readlines()
|
||||
for l in lines:
|
||||
if re.search('\s*#',l): # ignore comments
|
||||
continue
|
||||
match = re.search('--root\s*=?\s*"?([^"]+)"?',l)
|
||||
if match:
|
||||
root = match.groups()[0]
|
||||
root = root.strip()
|
||||
return root
|
||||
return Globals.root
|
||||
|
||||
#-------------------------------------
|
||||
def select_root(root:str, yes_to_all:bool=False):
|
||||
@ -698,49 +683,27 @@ def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
print(f'** INITIALIZING INVOKEAI RUNTIME DIRECTORY **')
|
||||
root_selected = False
|
||||
while not root_selected:
|
||||
root = select_root(root,yes_to_all)
|
||||
outputs = select_outputs(root,yes_to_all)
|
||||
Globals.root = os.path.abspath(root)
|
||||
outputs = outputs if os.path.isabs(outputs) else os.path.abspath(os.path.join(Globals.root,outputs))
|
||||
|
||||
print(f'\nInvokeAI models and configuration files will be placed into "{root}" and image outputs will be placed into "{outputs}".')
|
||||
print(f'\nInvokeAI image outputs will be placed into "{outputs}".')
|
||||
if not yes_to_all:
|
||||
root_selected = yes_or_no('Accept these locations?')
|
||||
root_selected = yes_or_no('Accept this location?')
|
||||
else:
|
||||
root_selected = True
|
||||
|
||||
print(f'\nYou may change the chosen directories at any time by editing the --root and --outdir options in "{Globals.initfile}",')
|
||||
print(f'\nYou may change the chosen output directory at any time by editing the --outdir options in "{Globals.initfile}",')
|
||||
print(f'You may also change the runtime directory by setting the environment variable INVOKEAI_ROOT.\n')
|
||||
|
||||
enable_safety_checker = True
|
||||
default_sampler = 'k_heun'
|
||||
default_steps = '20' # deliberately a string - see test below
|
||||
|
||||
sampler_choices =['ddim','k_dpm_2_a','k_dpm_2','k_euler_a','k_euler','k_heun','k_lms','plms']
|
||||
|
||||
if not yes_to_all:
|
||||
print('The NSFW (not safe for work) checker blurs out images that potentially contain sexual imagery.')
|
||||
print('It can be selectively enabled at run time with --nsfw_checker, and disabled with --no-nsfw_checker.')
|
||||
print('The following option will set whether the checker is enabled by default. Like other options, you can')
|
||||
print(f'change this setting later by editing the file {Globals.initfile}.')
|
||||
print(f"This is NOT recommended for systems with less than 6G VRAM because of the checker's memory requirements.")
|
||||
enable_safety_checker = yes_or_no('Enable the NSFW checker by default?',enable_safety_checker)
|
||||
|
||||
print('\nThe next choice selects the sampler to use by default. Samplers have different speed/performance')
|
||||
print('tradeoffs. If you are not sure what to select, accept the default.')
|
||||
sampler = None
|
||||
while sampler not in sampler_choices:
|
||||
sampler = input(f'Default sampler to use? ({", ".join(sampler_choices)}) [{default_sampler}]:') or default_sampler
|
||||
|
||||
print('\nThe number of denoising steps affects both the speed and quality of the images generated.')
|
||||
print('Higher steps often (but not always) increases the quality of the image, but increases image')
|
||||
print('generation time. This can be changed at run time. Accept the default if you are unsure.')
|
||||
steps = ''
|
||||
while not steps.isnumeric():
|
||||
steps = input(f'Default number of steps to use during generation? [{default_steps}]:') or default_steps
|
||||
else:
|
||||
sampler = default_sampler
|
||||
steps = default_steps
|
||||
|
||||
safety_checker = '--nsfw_checker' if enable_safety_checker else '--no-nsfw_checker'
|
||||
|
||||
for name in ('models','configs','embeddings'):
|
||||
@ -751,7 +714,7 @@ def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
shutil.copytree(src,dest,dirs_exist_ok=True)
|
||||
os.makedirs(outputs, exist_ok=True)
|
||||
|
||||
init_file = os.path.expanduser(Globals.initfile)
|
||||
init_file = os.path.join(Globals.root,Globals.initfile)
|
||||
|
||||
print(f'Creating the initialization file at "{init_file}".\n')
|
||||
with open(init_file,'w') as f:
|
||||
@ -760,16 +723,11 @@ def initialize_rootdir(root:str,yes_to_all:bool=False):
|
||||
# Feel free to edit. If anything goes wrong, you can re-initialize this file by deleting
|
||||
# or renaming it and then running configure_invokeai.py again.
|
||||
|
||||
# The --root option below points to the folder in which InvokeAI stores its models, configs and outputs.
|
||||
--root="{Globals.root}"
|
||||
|
||||
# the --outdir option controls the default location of image files.
|
||||
--outdir="{outputs}"
|
||||
|
||||
# generation arguments
|
||||
{safety_checker}
|
||||
--sampler={sampler}
|
||||
--steps={steps}
|
||||
|
||||
# You may place other frequently-used startup commands here, one or more per line.
|
||||
# Examples:
|
||||
@ -835,7 +793,7 @@ def main():
|
||||
|
||||
# We check for to see if the runtime directory is correctly initialized.
|
||||
if Globals.root == '' \
|
||||
or not os.path.exists(os.path.join(Globals.root,'configs/stable-diffusion/v1-inference.yaml')):
|
||||
or not os.path.exists(os.path.join(Globals.root,'invokeai.init')):
|
||||
initialize_rootdir(Globals.root,opt.yes_to_all)
|
||||
|
||||
# Optimistically try to download all required assets. If any errors occur, add them and proceed anyway.
|
||||
|
2
setup.py
2
setup.py
@ -6,7 +6,7 @@ from setuptools import setup, find_packages
|
||||
def list_files(directory):
|
||||
return [os.path.join(directory,x) for x in os.listdir(directory) if os.path.isfile(os.path.join(directory,x))]
|
||||
|
||||
VERSION = '2.2.0'
|
||||
VERSION = '2.2.4'
|
||||
DESCRIPTION = ('An implementation of Stable Diffusion which provides various new features'
|
||||
' and options to aid the image generation process')
|
||||
LONG_DESCRIPTION = ('This version of Stable Diffusion features a slick WebGUI, an'
|
||||
|
@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
VERSION='2.2.3'
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.sh.in invokeAI/install.sh
|
||||
chmod a+x invokeAI/install.sh
|
||||
cp readme.txt invokeAI
|
||||
|
||||
zip -r invokeAI-src-installer-$VERSION-linux.zip invokeAI
|
||||
zip -r invokeAI-src-installer-$VERSION-mac.zip invokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.bat.in invokeAI/install.bat
|
||||
cp readme.txt invokeAI
|
||||
cp WinLongPathsEnabled.reg invokeAI
|
||||
|
||||
zip -r invokeAI-src-installer-$VERSION-windows.zip invokeAI
|
||||
|
||||
rm -rf invokeAI
|
||||
echo "The installer zips are ready to be distributed.."
|
@ -1,127 +0,0 @@
|
||||
@echo off
|
||||
|
||||
@rem This script will install git and conda (if not found on the PATH variable)
|
||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
@rem For users who already have git and conda, this step will be skipped.
|
||||
|
||||
@rem Next, it'll checkout the project's git repo, if necessary.
|
||||
@rem Finally, it'll create the conda environment and configure InvokeAI.
|
||||
|
||||
@rem This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
@rem change to the script's directory
|
||||
PUSHD "%~dp0"
|
||||
|
||||
echo "InvokeAI source installer..."
|
||||
echo ""
|
||||
echo "Some of the installation steps take a long time to run. Please be patient."
|
||||
echo "If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry."
|
||||
echo "<Press any key to start the install process>"
|
||||
pause
|
||||
echo ""
|
||||
|
||||
@rem config
|
||||
set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set REPO_URL=https://github.com/invoke-ai/InvokeAI.git
|
||||
set umamba_exists=F
|
||||
@rem Change the download URL to an InvokeAI repo's release URL
|
||||
|
||||
@rem figure out whether git and conda needs to be installed
|
||||
if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
set PACKAGES_TO_INSTALL=
|
||||
|
||||
call conda --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda
|
||||
|
||||
call git --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" EQU "0" set umamba_exists=T
|
||||
|
||||
@rem (if necessary) install git and conda into a contained environment
|
||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
||||
@rem download micromamba
|
||||
if "%umamba_exists%" == "F" (
|
||||
echo "Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
mkdir "%MAMBA_ROOT_PREFIX%"
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
@rem test the mamba binary
|
||||
echo Micromamba version:
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version
|
||||
)
|
||||
|
||||
@rem create the installer env
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
)
|
||||
|
||||
echo "Packages to install:%PACKAGES_TO_INSTALL%"
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
echo "There was a problem while installing%PACKAGES_TO_INSTALL% using micromamba. Cannot continue."
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
)
|
||||
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
@rem get the repo (and load into the current directory)
|
||||
if not exist ".git" (
|
||||
call git init
|
||||
call git config --local init.defaultBranch main
|
||||
call git remote add origin %REPO_URL%
|
||||
call git fetch
|
||||
call git checkout origin/main -ft
|
||||
)
|
||||
|
||||
@rem activate the base env
|
||||
call conda activate
|
||||
|
||||
@rem create the environment
|
||||
call conda env remove -n invokeai
|
||||
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||
call conda env create
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "See https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#troubleshooting for troubleshooting"
|
||||
echo "tips, or visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
copy source_installer\invoke.bat.in .\invoke.bat
|
||||
copy source_installer\update.bat.in .\update.bat
|
||||
|
||||
call conda activate invokeai
|
||||
@rem call configure script
|
||||
call python scripts\configure_invokeai.py
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "The configure script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. To run preload_models.py again,"
|
||||
echo "run the command 'update.bat' in this directory."
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
@rem tell the user their next steps
|
||||
echo ""
|
||||
echo "* InvokeAI installed successfully *"
|
||||
echo "You can now start generating images by double-clicking the 'invoke.bat' file (inside this folder)
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
|
||||
|
@ -1,152 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script will install git and conda (if not found on the PATH variable)
|
||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
# For users who already have git and conda, this step will be skipped.
|
||||
|
||||
# Next, it'll checkout the project's git repo, if necessary.
|
||||
# Finally, it'll create the conda environment and configure InvokeAI.
|
||||
|
||||
# This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
echo "InvokeAI source installer..."
|
||||
echo ""
|
||||
echo "Some of the installation steps take a long time to run. Please be patient."
|
||||
echo "If the script appears to hang for more than 10 minutes, please interrupt with control-C and retry."
|
||||
read -n 1 -s -r -p "<Press any key to start the install>"
|
||||
echo ""
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="osx";;
|
||||
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) OS_ARCH="64";;
|
||||
arm64*) OS_ARCH="arm64";;
|
||||
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
||||
esac
|
||||
|
||||
# https://mamba.readthedocs.io/en/latest/installation.html
|
||||
if [ "$OS_NAME" == "linux" ] && [ "$OS_ARCH" == "arm64" ]; then OS_ARCH="aarch64"; fi
|
||||
|
||||
# config
|
||||
export MAMBA_ROOT_PREFIX="$(pwd)/installer_files/mamba"
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${OS_NAME}-${OS_ARCH}/latest"
|
||||
REPO_URL="https://github.com/invoke-ai/InvokeAI.git"
|
||||
umamba_exists="F"
|
||||
|
||||
# figure out whether git and conda needs to be installed
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
if ! $(which conda) -V &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL conda"; fi
|
||||
if ! which git &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||
|
||||
if "$MAMBA_ROOT_PREFIX/micromamba" --version &>/dev/null; then umamba_exists="T"; fi
|
||||
|
||||
# (if necessary) install git and conda into a contained environment
|
||||
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
||||
# download micromamba
|
||||
if [ "$umamba_exists" == "F" ]; then
|
||||
echo "Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to $MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
mkdir -p "$MAMBA_ROOT_PREFIX"
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvjO bin/micromamba > "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
chmod u+x "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
# test the mamba binary
|
||||
echo "Micromamba version:"
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" --version
|
||||
fi
|
||||
|
||||
# create the installer env
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" create -y --prefix "$INSTALL_ENV_DIR"
|
||||
fi
|
||||
|
||||
echo "Packages to install:$PACKAGES_TO_INSTALL"
|
||||
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge $PACKAGES_TO_INSTALL
|
||||
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
echo "There was a problem while initializing micromamba. Cannot continue."
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
# get the repo (and load into the current directory)
|
||||
if [ ! -e ".git" ]; then
|
||||
git init
|
||||
git config --local init.defaultBranch main
|
||||
git remote add origin "$REPO_URL"
|
||||
git fetch
|
||||
git checkout origin/main -ft
|
||||
fi
|
||||
|
||||
# create the environment
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||
|
||||
conda activate
|
||||
if [ "$OS_NAME" == "osx" ]; then
|
||||
echo "macOS detected. Installing MPS and CPU support."
|
||||
ln -sf environments-and-requirements/environment-mac.yml environment.yml
|
||||
else
|
||||
if (lsmod | grep amdgpu) &>/dev/null ; then
|
||||
echo "Linux system with AMD GPU driver detected. Installing ROCm and CPU support"
|
||||
ln -sf environments-and-requirements/environment-lin-amd.yml environment.yml
|
||||
else
|
||||
echo "Linux system detected. Installing CUDA and CPU support."
|
||||
ln -sf environments-and-requirements/environment-lin-cuda.yml environment.yml
|
||||
fi
|
||||
fi
|
||||
conda env update
|
||||
|
||||
status=$?
|
||||
|
||||
if test $status -ne 0
|
||||
then
|
||||
if [ "$OS_NAME" == "osx" ]; then
|
||||
echo "Python failed to install the environment. You may need to install"
|
||||
echo "the Xcode command line tools to proceed. See step number 3 of"
|
||||
echo "https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#walk_through for"
|
||||
echo "installation instructions and then run this script again."
|
||||
else
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "See https://invoke-ai.github.io/InvokeAI/INSTALL_SOURCE#troubleshooting for troubleshooting"
|
||||
echo "tips, or visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
fi
|
||||
else
|
||||
cp ./source_installer/invoke.sh.in ./invoke.sh
|
||||
cp ./source_installer/update.sh.in ./update.sh
|
||||
chmod a+rx ./source_installer/invoke.sh.in
|
||||
chmod a+rx ./source_installer/update.sh.in
|
||||
|
||||
conda activate invokeai
|
||||
# configure
|
||||
echo "Calling the configure_invokeai script"
|
||||
python scripts/configure_invokeai.py
|
||||
status=$?
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "The configure_invoke.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. Try again by running"
|
||||
echo "update.sh in this directory."
|
||||
else
|
||||
# tell the user their next steps
|
||||
echo "You can now start generating images by running invoke.sh (inside this folder), using ./invoke.sh"
|
||||
fi
|
||||
fi
|
||||
|
||||
conda activate invokeai
|
@ -1,34 +0,0 @@
|
||||
@echo off
|
||||
|
||||
REM isolate changes to environment variables so that this can be run again with restarting a cmd session
|
||||
setlocal
|
||||
|
||||
PUSHD "%~dp0"
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
call conda activate invokeai
|
||||
|
||||
echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 2. browser-based UI
|
||||
echo 3. open the developer console
|
||||
set /P restore="Please enter 1, 2 or 3: "
|
||||
IF /I "%restore%" == "1" (
|
||||
echo Starting the InvokeAI command-line..
|
||||
python scripts\invoke.py %*
|
||||
) ELSE IF /I "%restore%" == "2" (
|
||||
echo Starting the InvokeAI browser-based UI..
|
||||
python scripts\invoke.py --web %*
|
||||
) ELSE IF /I "%restore%" == "3" (
|
||||
echo Developer Console
|
||||
call where python
|
||||
call python --version
|
||||
|
||||
cmd /k
|
||||
) ELSE (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
endlocal
|
@ -1,16 +0,0 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Installation on Windows:
|
||||
You may need to enable Windows Long Paths to install InvokeAI. If you're not
|
||||
sure what this is, you almost certainly need to do this. Simply double-click the
|
||||
"WinLongPathsEnabled.reg" file located in this directory, and approve the Windows
|
||||
warnings. Note that you will need to have admin privileges in order to do this.
|
||||
|
||||
Then double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh' file (on Linux/Mac) to start InvokeAI.
|
@ -1,19 +0,0 @@
|
||||
@echo off
|
||||
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
@rem update the repo
|
||||
if exist ".git" (
|
||||
call git pull
|
||||
)
|
||||
|
||||
|
||||
conda env update
|
||||
conda activate invokeai
|
||||
python scripts/preload_models.py
|
||||
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit 0
|
||||
|
@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
# update the repo
|
||||
if [ -e ".git" ]; then
|
||||
git pull
|
||||
fi
|
||||
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||
|
||||
conda activate invokeai
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) conda env update;;
|
||||
Darwin*) conda env update -f environment-mac.yml;;
|
||||
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||
esac
|
||||
|
||||
python scripts/preload_models.py
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user