sync dev to main
1
.github/CODEOWNERS
vendored
@ -2,3 +2,4 @@ ldm/invoke/pngwriter.py @CapableWeb
|
||||
ldm/invoke/server_legacy.py @CapableWeb
|
||||
scripts/legacy_api.py @CapableWeb
|
||||
tests/legacy_tests.sh @CapableWeb
|
||||
installer/ @tildebyte
|
||||
|
4
.github/workflows/build-container.yml
vendored
@ -17,9 +17,9 @@ jobs:
|
||||
- aarch64
|
||||
include:
|
||||
- arch: x86_64
|
||||
conda-env-file: environment.yml
|
||||
conda-env-file: environment-lin-cuda.yml
|
||||
- arch: aarch64
|
||||
conda-env-file: environment-linux-aarch64.yml
|
||||
conda-env-file: environment-lin-aarch64.yml
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ matrix.arch }}
|
||||
steps:
|
||||
|
7
.github/workflows/test-invoke-conda.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
- macOS-12
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
environment-file: environment.yml
|
||||
environment-file: environment-lin-cuda.yml
|
||||
default-shell: bash -l {0}
|
||||
- os: macOS-12
|
||||
environment-file: environment-mac.yml
|
||||
@ -48,6 +48,9 @@ jobs:
|
||||
- name: create models.yaml from example
|
||||
run: cp configs/models.yaml.example configs/models.yaml
|
||||
|
||||
- name: create environment.yml
|
||||
run: cp environments-and-requirements/${{ matrix.environment-file }} environment.yml
|
||||
|
||||
- name: Use cached conda packages
|
||||
id: use-cached-conda-packages
|
||||
uses: actions/cache@v3
|
||||
@ -60,7 +63,7 @@ jobs:
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
with:
|
||||
activate-environment: ${{ env.CONDA_ENV_NAME }}
|
||||
environment-file: ${{ matrix.environment-file }}
|
||||
environment-file: environment.yml
|
||||
miniconda-version: latest
|
||||
|
||||
- name: set test prompt to main branch validation
|
||||
|
25
.gitignore
vendored
@ -194,6 +194,10 @@ checkpoints
|
||||
|
||||
# Let the frontend manage its own gitignore
|
||||
!frontend/*
|
||||
frontend/apt-get
|
||||
frontend/dist
|
||||
frontend/sudo
|
||||
frontend/update
|
||||
|
||||
# Scratch folder
|
||||
.scratch/
|
||||
@ -201,6 +205,7 @@ checkpoints
|
||||
gfpgan/
|
||||
models/ldm/stable-diffusion-v1/*.sha256
|
||||
|
||||
|
||||
# GFPGAN model files
|
||||
gfpgan/
|
||||
|
||||
@ -209,6 +214,24 @@ configs/models.yaml
|
||||
|
||||
# weights (will be created by installer)
|
||||
models/ldm/stable-diffusion-v1/*.ckpt
|
||||
models/clipseg
|
||||
models/gfpgan
|
||||
|
||||
# ignore initfile
|
||||
invokeai.init
|
||||
.invokeai
|
||||
|
||||
# ignore environment.yml and requirements.txt
|
||||
# these are links to the real files in environments-and-requirements
|
||||
environment.yml
|
||||
requirements.txt
|
||||
|
||||
# source installer files
|
||||
source_installer/*zip
|
||||
source_installer/invokeAI
|
||||
install.bat
|
||||
install.sh
|
||||
update.bat
|
||||
update.sh
|
||||
|
||||
# this may be present if the user created a venv
|
||||
invokeai
|
||||
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 466 KiB After Width: | Height: | Size: 466 KiB |
Before Width: | Height: | Size: 7.4 KiB After Width: | Height: | Size: 7.4 KiB |
Before Width: | Height: | Size: 539 KiB After Width: | Height: | Size: 539 KiB |
Before Width: | Height: | Size: 7.6 KiB After Width: | Height: | Size: 7.6 KiB |
Before Width: | Height: | Size: 450 KiB After Width: | Height: | Size: 450 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 553 KiB After Width: | Height: | Size: 553 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 418 KiB After Width: | Height: | Size: 418 KiB |
Before Width: | Height: | Size: 6.1 KiB After Width: | Height: | Size: 6.1 KiB |
Before Width: | Height: | Size: 542 KiB After Width: | Height: | Size: 542 KiB |
Before Width: | Height: | Size: 9.5 KiB After Width: | Height: | Size: 9.5 KiB |
Before Width: | Height: | Size: 395 KiB After Width: | Height: | Size: 395 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 465 KiB After Width: | Height: | Size: 465 KiB |
Before Width: | Height: | Size: 7.8 KiB After Width: | Height: | Size: 7.8 KiB |
@ -43,33 +43,42 @@ RUN apt-get update \
|
||||
ARG invokeai_git=invoke-ai/InvokeAI
|
||||
ARG invokeai_branch=main
|
||||
ARG project_name=invokeai
|
||||
RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git /${project_name} \
|
||||
&& cp /${project_name}/configs/models.yaml.example /${project_name}/configs/models.yaml \
|
||||
&& ln -s /data/models/v1-5-pruned-emaonly.ckpt /${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt \
|
||||
&& ln -s /data/outputs/ /${project_name}/outputs
|
||||
ARG conda_env_file=environment-lin-cuda.yml
|
||||
RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git "/${project_name}" \
|
||||
&& cp \
|
||||
"/${project_name}/configs/models.yaml.example" \
|
||||
"/${project_name}/configs/models.yaml" \
|
||||
&& ln -sf \
|
||||
"/${project_name}/environments-and-requirements/${conda_env_file}" \
|
||||
"/${project_name}/environment.yml" \
|
||||
&& ln -sf \
|
||||
/data/models/v1-5-pruned-emaonly.ckpt \
|
||||
"/${project_name}/models/ldm/stable-diffusion-v1/v1-5-pruned-emaonly.ckpt" \
|
||||
&& ln -sf \
|
||||
/data/outputs/ \
|
||||
"/${project_name}/outputs"
|
||||
|
||||
# set workdir
|
||||
WORKDIR /${project_name}
|
||||
WORKDIR "/${project_name}"
|
||||
|
||||
# install conda env and preload models
|
||||
ARG conda_prefix=/opt/conda
|
||||
ARG conda_env_file=environment.yml
|
||||
COPY --from=get_miniconda ${conda_prefix} ${conda_prefix}
|
||||
RUN source ${conda_prefix}/etc/profile.d/conda.sh \
|
||||
COPY --from=get_miniconda "${conda_prefix}" "${conda_prefix}"
|
||||
RUN source "${conda_prefix}/etc/profile.d/conda.sh" \
|
||||
&& conda init bash \
|
||||
&& source ~/.bashrc \
|
||||
&& conda env create \
|
||||
--name ${project_name} \
|
||||
--file ${conda_env_file} \
|
||||
--name "${project_name}" \
|
||||
&& rm -Rf ~/.cache \
|
||||
&& conda clean -afy \
|
||||
&& echo "conda activate ${project_name}" >> ~/.bashrc \
|
||||
&& conda activate ${project_name} \
|
||||
&& echo "conda activate ${project_name}" >> ~/.bashrc
|
||||
|
||||
RUN source ~/.bashrc \
|
||||
&& python scripts/preload_models.py \
|
||||
--no-interactive
|
||||
|
||||
# Copy entrypoint and set env
|
||||
ENV CONDA_PREFIX=${conda_prefix}
|
||||
ENV PROJECT_NAME=${project_name}
|
||||
ENV CONDA_PREFIX="${conda_prefix}"
|
||||
ENV PROJECT_NAME="${project_name}"
|
||||
COPY docker-build/entrypoint.sh /
|
||||
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||
|
@ -8,7 +8,7 @@ source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
||||
|
||||
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
|
||||
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
|
||||
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment.yml}
|
||||
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment-lin-cuda.yml}
|
||||
invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI}
|
||||
invokeai_branch=${INVOKEAI_BRANCH:-main}
|
||||
huggingface_token=${HUGGINGFACE_TOKEN?}
|
||||
|
@ -19,13 +19,13 @@ tree on a hill with a river, nature photograph, national geographic -I./test-pic
|
||||
This will take the original image shown here:
|
||||
|
||||
<figure markdown>
|
||||
![](https://user-images.githubusercontent.com/50542132/193946000-c42a96d8-5a74-4f8a-b4c3-5213e6cadcce.png)
|
||||
![original-image](https://user-images.githubusercontent.com/50542132/193946000-c42a96d8-5a74-4f8a-b4c3-5213e6cadcce.png){ width=320 }
|
||||
</figure>
|
||||
|
||||
and generate a new image based on it as shown here:
|
||||
|
||||
<figure markdown>
|
||||
![](https://user-images.githubusercontent.com/111189/194135515-53d4c060-e994-4016-8121-7c685e281ac9.png)
|
||||
![generated-image](https://user-images.githubusercontent.com/111189/194135515-53d4c060-e994-4016-8121-7c685e281ac9.png){ width=320 }
|
||||
</figure>
|
||||
|
||||
The `--init_img` (`-I`) option gives the path to the seed picture. `--strength`
|
||||
@ -45,15 +45,16 @@ Note that the prompt makes a big difference. For example, this slight variation
|
||||
on the prompt produces a very different image:
|
||||
|
||||
<figure markdown>
|
||||
![](https://user-images.githubusercontent.com/111189/194135220-16b62181-b60c-4248-8989-4834a8fd7fbd.png)
|
||||
![](https://user-images.githubusercontent.com/111189/194135220-16b62181-b60c-4248-8989-4834a8fd7fbd.png){ width=320 }
|
||||
<caption markdown>photograph of a tree on a hill with a river</caption>
|
||||
</figure>
|
||||
|
||||
!!! tip
|
||||
|
||||
When designing prompts, think about how the images scraped from the internet were captioned. Very few photographs will
|
||||
be labeled "photograph" or "photorealistic." They will, however, be captioned with the publication, photographer, camera
|
||||
model, or film settings.
|
||||
When designing prompts, think about how the images scraped from the internet were
|
||||
captioned. Very few photographs will be labeled "photograph" or "photorealistic."
|
||||
They will, however, be captioned with the publication, photographer, camera model,
|
||||
or film settings.
|
||||
|
||||
If the initial image contains transparent regions, then Stable Diffusion will
|
||||
only draw within the transparent regions, a process called
|
||||
@ -61,17 +62,17 @@ only draw within the transparent regions, a process called
|
||||
However, for this to work correctly, the color information underneath the
|
||||
transparent needs to be preserved, not erased.
|
||||
|
||||
!!! warning
|
||||
!!! warning "**IMPORTANT ISSUE** "
|
||||
|
||||
**IMPORTANT ISSUE** `img2img` does not work properly on initial images smaller
|
||||
than 512x512. Please scale your image to at least 512x512 before using it.
|
||||
Larger images are not a problem, but may run out of VRAM on your GPU card. To
|
||||
fix this, use the --fit option, which downscales the initial image to fit within
|
||||
the box specified by width x height:
|
||||
`img2img` does not work properly on initial images smaller
|
||||
than 512x512. Please scale your image to at least 512x512 before using it.
|
||||
Larger images are not a problem, but may run out of VRAM on your GPU card. To
|
||||
fix this, use the --fit option, which downscales the initial image to fit within
|
||||
the box specified by width x height:
|
||||
|
||||
```
|
||||
tree on a hill with a river, national geographic -I./test-pictures/big-sketch.png -H512 -W512 --fit
|
||||
```
|
||||
```
|
||||
tree on a hill with a river, national geographic -I./test-pictures/big-sketch.png -H512 -W512 --fit
|
||||
```
|
||||
|
||||
## How does it actually work, though?
|
||||
|
||||
@ -87,7 +88,7 @@ from a prompt. If the step count is 10, then the "latent space" (Stable
|
||||
Diffusion's internal representation of the image) for the prompt "fire" with
|
||||
seed `1592514025` develops something like this:
|
||||
|
||||
```commandline
|
||||
```bash
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025
|
||||
```
|
||||
|
||||
@ -133,9 +134,9 @@ Notice how much more fuzzy the starting image is for strength `0.7` compared to
|
||||
|
||||
| | strength = 0.7 | strength = 0.4 |
|
||||
| --------------------------- | ------------------------------------------------------------- | ------------------------------------------------------------- |
|
||||
| initial image that SD sees | ![](../assets/img2img/000032.step-0.png) | ![](../assets/img2img/000030.step-0.png) |
|
||||
| initial image that SD sees | ![step-0](../assets/img2img/000032.step-0.png) | ![step-0](../assets/img2img/000030.step-0.png) |
|
||||
| steps argument to `invoke>` | `-S10` | `-S10` |
|
||||
| steps actually taken | 7 | 4 |
|
||||
| steps actually taken | `7` | `4` |
|
||||
| latent space at each step | ![gravity32](../assets/img2img/000032.steps.gravity.png) | ![gravity30](../assets/img2img/000030.steps.gravity.png) |
|
||||
| output | ![000032.1592514025](../assets/img2img/000032.1592514025.png) | ![000030.1592514025](../assets/img2img/000030.1592514025.png) |
|
||||
|
||||
@ -150,7 +151,7 @@ If you want to try this out yourself, all of these are using a seed of
|
||||
`1592514025` with a width/height of `384`, step count `10`, the default sampler
|
||||
(`k_lms`), and the single-word prompt `"fire"`:
|
||||
|
||||
```commandline
|
||||
```bash
|
||||
invoke> "fire" -s10 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png --strength 0.7
|
||||
```
|
||||
|
||||
@ -170,7 +171,7 @@ give each generation 20 steps.
|
||||
Here's strength `0.4` (note step count `50`, which is `20 ÷ 0.4` to make sure SD
|
||||
does `20` steps from my image):
|
||||
|
||||
```commandline
|
||||
```bash
|
||||
invoke> "fire" -s50 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.4
|
||||
```
|
||||
|
||||
|
@ -126,7 +126,7 @@ A number of caveats:
|
||||
the border.
|
||||
|
||||
4. When using the `inpaint-1.5` model, you may notice subtle changes to the area
|
||||
within the original image. This is because the model performs an
|
||||
outside the masked region. This is because the model performs an
|
||||
encoding/decoding on the image as a whole. This does not occur with the
|
||||
standard model.
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
title: WebUI Hotkey List
|
||||
---
|
||||
|
||||
# **WebUI Hotkey List**
|
||||
# :material-keyboard: **WebUI Hotkey List**
|
||||
|
||||
## General
|
||||
|
||||
@ -19,7 +19,7 @@ title: WebUI Hotkey List
|
||||
| ++ctrl+enter++ | Start processing |
|
||||
| ++shift+x++ | cancel Processing |
|
||||
| ++shift+d++ | Toggle Dark Mode |
|
||||
| ` | Toggle console |
|
||||
| ++"`"++ | Toggle console |
|
||||
|
||||
## Tabs
|
||||
|
||||
@ -48,10 +48,10 @@ title: WebUI Hotkey List
|
||||
|
||||
| Setting | Hotkey |
|
||||
| ---------------------------- | --------------------- |
|
||||
| [ | Decrease brush size |
|
||||
| ] | Increase brush size |
|
||||
| alt + [ | Decrease mask opacity |
|
||||
| alt + ] | Increase mask opacity |
|
||||
| ++"["++ | Decrease brush size |
|
||||
| ++"]"++ | Increase brush size |
|
||||
| ++alt+"["++ | Decrease mask opacity |
|
||||
| ++alt+"]"++ | Increase mask opacity |
|
||||
| ++b++ | Select brush |
|
||||
| ++e++ | Select eraser |
|
||||
| ++ctrl+z++ | Undo brush stroke |
|
||||
|
@ -94,6 +94,7 @@ installation instructions below.
|
||||
You wil need one of the following:
|
||||
|
||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux only)
|
||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||
|
||||
### :fontawesome-solid-memory: Memory
|
||||
|
@ -4,26 +4,30 @@ title: Docker
|
||||
|
||||
# :fontawesome-brands-docker: Docker
|
||||
|
||||
## Before you begin
|
||||
!!! warning "For end users"
|
||||
|
||||
- For end users: Install Stable Diffusion locally using the instructions for
|
||||
your OS.
|
||||
- For developers: For container-related development tasks or for enabling easy
|
||||
deployment to other environments (on-premises or cloud), follow these
|
||||
instructions. For general use, install locally to leverage your machine's GPU.
|
||||
We highly recommend to Install InvokeAI locally using [these instructions](index.md)"
|
||||
|
||||
!!! tip "For developers"
|
||||
|
||||
For container-related development tasks or for enabling easy
|
||||
deployment to other environments (on-premises or cloud), follow these
|
||||
instructions.
|
||||
|
||||
For general use, install locally to leverage your machine's GPU.
|
||||
|
||||
## Why containers?
|
||||
|
||||
They provide a flexible, reliable way to build and deploy Stable Diffusion.
|
||||
You'll also use a Docker volume to store the largest model files and image
|
||||
outputs as a first step in decoupling storage and compute. Future enhancements
|
||||
can do this for other assets. See [Processes](https://12factor.net/processes)
|
||||
under the Twelve-Factor App methodology for details on why running applications
|
||||
in such a stateless fashion is important.
|
||||
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
||||
use a Docker volume to store the largest model files and image outputs as a
|
||||
first step in decoupling storage and compute. Future enhancements can do this
|
||||
for other assets. See [Processes](https://12factor.net/processes) under the
|
||||
Twelve-Factor App methodology for details on why running applications in such a
|
||||
stateless fashion is important.
|
||||
|
||||
You can specify the target platform when building the image and running the
|
||||
container. You'll also need to specify the Stable Diffusion requirements file
|
||||
that matches the container's OS and the architecture it will run on.
|
||||
container. You'll also need to specify the InvokeAI requirements file that
|
||||
matches the container's OS and the architecture it will run on.
|
||||
|
||||
Developers on Apple silicon (M1/M2): You
|
||||
[can't access your GPU cores from Docker containers](https://github.com/pytorch/pytorch/issues/81224)
|
||||
@ -38,16 +42,19 @@ another environment with NVIDIA GPUs on-premises or in the cloud.
|
||||
|
||||
#### Install [Docker](https://github.com/santisbon/guides#docker)
|
||||
|
||||
On the Docker Desktop app, go to Preferences, Resources, Advanced. Increase the
|
||||
CPUs and Memory to avoid this
|
||||
On the [Docker Desktop app](https://docs.docker.com/get-docker/), go to
|
||||
Preferences, Resources, Advanced. Increase the CPUs and Memory to avoid this
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues/342). You may need to
|
||||
increase Swap and Disk image size too.
|
||||
|
||||
#### Get a Huggingface-Token
|
||||
|
||||
Go to [Hugging Face](https://huggingface.co/settings/tokens), create a token and
|
||||
temporary place it somewhere like a open texteditor window (but dont save it!,
|
||||
only keep it open, we need it in the next step)
|
||||
Besides the Docker Agent you will need an Account on
|
||||
[huggingface.co](https://huggingface.co/join).
|
||||
|
||||
After you succesfully registered your account, go to
|
||||
[huggingface.co/settings/tokens](https://huggingface.co/settings/tokens), create
|
||||
a token and copy it, since you will need in for the next step.
|
||||
|
||||
### Setup
|
||||
|
||||
@ -65,13 +72,14 @@ created in the last step.
|
||||
|
||||
Some Suggestions of variables you may want to change besides the Token:
|
||||
|
||||
| Environment-Variable | Description |
|
||||
| ------------------------------------------------------------------- | ------------------------------------------------------------------------ |
|
||||
| `HUGGINGFACE_TOKEN="hg_aewirhghlawrgkjbarug2"` | This is the only required variable, without you can't get the checkpoint |
|
||||
| `ARCH=aarch64` | if you are using a ARM based CPU |
|
||||
| `INVOKEAI_TAG=yourname/invokeai:latest` | the Container Repository / Tag which will be used |
|
||||
| `INVOKEAI_CONDA_ENV_FILE=environment-linux-aarch64.yml` | since environment.yml wouldn't work with aarch |
|
||||
| `INVOKEAI_GIT="-b branchname https://github.com/username/reponame"` | if you want to use your own fork |
|
||||
| Environment-Variable | Default value | Description |
|
||||
| ------------------------- | ----------------------------- | ---------------------------------------------------------------------------- |
|
||||
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without you can't get the checkpoint |
|
||||
| `ARCH` | x86_64 | if you are using a ARM based CPU |
|
||||
| `INVOKEAI_TAG` | invokeai-x86_64 | the Container Repository / Tag which will be used |
|
||||
| `INVOKEAI_CONDA_ENV_FILE` | environment-lin-cuda.yml | since environment.yml wouldn't work with aarch |
|
||||
| `INVOKEAI_GIT` | invoke-ai/InvokeAI | the repository to use |
|
||||
| `INVOKEAI_BRANCH` | main | the branch to checkout |
|
||||
|
||||
#### Build the Image
|
||||
|
||||
@ -79,25 +87,41 @@ I provided a build script, which is located in `docker-build/build.sh` but still
|
||||
needs to be executed from the Repository root.
|
||||
|
||||
```bash
|
||||
docker-build/build.sh
|
||||
./docker-build/build.sh
|
||||
```
|
||||
|
||||
The build Script not only builds the container, but also creates the docker
|
||||
volume if not existing yet, or if empty it will just download the models. When
|
||||
it is done you can run the container via the run script
|
||||
volume if not existing yet, or if empty it will just download the models.
|
||||
|
||||
#### Run the Container
|
||||
|
||||
After the build process is done, you can run the container via the provided
|
||||
`docker-build/run.sh` script
|
||||
|
||||
```bash
|
||||
docker-build/run.sh
|
||||
./docker-build/run.sh
|
||||
```
|
||||
|
||||
When used without arguments, the container will start the website and provide
|
||||
When used without arguments, the container will start the webserver and provide
|
||||
you the link to open it. But if you want to use some other parameters you can
|
||||
also do so.
|
||||
|
||||
!!! example ""
|
||||
|
||||
```bash
|
||||
./docker-build/run.sh --from_file tests/validate_pr_prompt.txt
|
||||
```
|
||||
|
||||
The output folder is located on the volume which is also used to store the model.
|
||||
|
||||
Find out more about available CLI-Parameters at [features/CLI.md](../features/CLI.md/#arguments)
|
||||
|
||||
---
|
||||
|
||||
!!! warning "Deprecated"
|
||||
|
||||
From here on it is the rest of the previous Docker-Docs, which will still
|
||||
provide usefull informations for one or the other.
|
||||
From here on you will find the the previous Docker-Docs, which will still
|
||||
provide some usefull informations.
|
||||
|
||||
## Usage (time to have fun)
|
||||
|
||||
|
64
docs/installation/INSTALL_INVOKE.md
Normal file
@ -0,0 +1,64 @@
|
||||
---
|
||||
title: InvokeAI Installer
|
||||
---
|
||||
|
||||
The InvokeAI installer is a shell script that will install InvokeAI onto a stock
|
||||
computer running recent versions of Linux, MacOSX or Windows. It will leave you
|
||||
with a version that runs a stable version of InvokeAI. When a new version of
|
||||
InvokeAI is released, you will download and reinstall the new version.
|
||||
|
||||
If you wish to tinker with unreleased versions of InvokeAI that introduce
|
||||
potentially unstable new features, you should consider using the
|
||||
[source installer](INSTALL_SOURCE.md) or one of the
|
||||
[manual install](INSTALL_MANUAL.md) methods.
|
||||
|
||||
**Important Caveats**
|
||||
- This script does not support AMD GPUs. For Linux AMD support,
|
||||
please use the manual or source code installer methods.
|
||||
|
||||
- This script has difficulty on some Macintosh machines
|
||||
that have previously been used for Python development due to
|
||||
conflicting development tools versions. Mac developers may wish
|
||||
to try the source code installer or one of the manual methods instead.
|
||||
|
||||
!!! todo
|
||||
|
||||
Before you begin, make sure that you meet
|
||||
the[hardware requirements](/#hardware-requirements) and has the
|
||||
appropriate GPU drivers installed. In particular, if you are a Linux user with
|
||||
an AMD GPU installed, you may need to install the
|
||||
[ROCm-driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
Installation requires roughly 18G of free disk space to load the libraries and
|
||||
recommended model weights files.
|
||||
|
||||
## Steps to Install
|
||||
|
||||
1. Download the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest) of
|
||||
InvokeAI's installer for your platform
|
||||
|
||||
2. Place the downloaded package someplace where you have plenty of HDD space,
|
||||
and have full permissions (i.e. `~/` on Lin/Mac; your home folder on Windows)
|
||||
|
||||
3. Extract the 'InvokeAI' folder from the downloaded package
|
||||
|
||||
4. Open the extracted 'InvokeAI' folder
|
||||
|
||||
5. Double-click 'install.bat' (Windows), or 'install.sh' (Lin/Mac) (or run from
|
||||
a terminal)
|
||||
|
||||
6. Follow the prompts
|
||||
|
||||
7. After installation, please run the 'invoke.bat' file (on Windows) or
|
||||
'invoke.sh' file (on Linux/Mac) to start InvokeAI.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
available to help you. Either create an
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||
make a request for help on the "bugs-and-support" channel of our
|
||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||
organization, but typically somebody will be available to help you within 24
|
||||
hours, and often much sooner.
|
27
docs/installation/INSTALL_JUPYTER.md
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
title: Running InvokeAI on Google Colab using a Jupyter Notebook
|
||||
---
|
||||
|
||||
# THIS NEEDS TO BE FLESHED OUT
|
||||
|
||||
## Introduction
|
||||
|
||||
We have a [Jupyter
|
||||
notebook](https://github.com/invoke-ai/InvokeAI/blob/main/notebooks/Stable-Diffusion-local-Windows.ipynb)
|
||||
with cell-by-cell installation steps. It will download the code in
|
||||
this repo as one of the steps, so instead of cloning this repo, simply
|
||||
download the notebook from the link above and load it up in VSCode
|
||||
(with the appropriate extensions installed)/Jupyter/JupyterLab and
|
||||
start running the cells one-by-one.
|
||||
|
||||
!!! Note "you will need NVIDIA drivers, Python 3.10, and Git installed beforehand"
|
||||
|
||||
## Walkthrough
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
### Updating the stable version
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
## Troubleshooting
|
416
docs/installation/INSTALL_MANUAL.md
Normal file
@ -0,0 +1,416 @@
|
||||
---
|
||||
title: Manual Installation
|
||||
---
|
||||
|
||||
<figure markdown>
|
||||
# :fontawesome-brands-linux: Linux | :fontawesome-brands-apple: macOS | :fontawesome-brands-windows: Windows
|
||||
</figure>
|
||||
|
||||
!!! warning "This is for advanced Users"
|
||||
|
||||
who are already expirienced with using conda or pip
|
||||
|
||||
## Introduction
|
||||
|
||||
You have two choices for manual installation, the [first one](#Conda_method)
|
||||
based on the Anaconda3 package manager (`conda`), and
|
||||
[a second one](#PIP_method) which uses basic Python virtual environment (`venv`)
|
||||
commands and the PIP package manager. Both methods require you to enter commands
|
||||
on the terminal, also known as the "console".
|
||||
|
||||
On Windows systems you are encouraged to install and use the
|
||||
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
||||
which provides compatibility with Linux and Mac shells and nice features such as
|
||||
command-line completion.
|
||||
|
||||
### Conda method
|
||||
|
||||
1. Check that your system meets the
|
||||
[hardware requirements](index.md#Hardware_Requirements) and has the
|
||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
||||
with an AMD GPU installed, you may need to install the
|
||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
|
||||
of ROCm driver support on this platform.
|
||||
|
||||
To confirm that the appropriate drivers are installed, run `nvidia-smi` on
|
||||
NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
|
||||
information about the installed video card.
|
||||
|
||||
Macintosh users with MPS acceleration, or anybody with a CPU-only system,
|
||||
can skip this step.
|
||||
|
||||
2. You will need to install Anaconda3 and Git if they are not already
|
||||
available. Use your operating system's preferred package manager, or
|
||||
download the installers manually. You can find them here:
|
||||
|
||||
- [Anaconda3](https://www.anaconda.com/)
|
||||
- [git](https://git-scm.com/downloads)
|
||||
|
||||
3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
||||
GitHub:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||
```
|
||||
|
||||
This will create InvokeAI folder where you will follow the rest of the
|
||||
steps.
|
||||
|
||||
4. Enter the newly-created InvokeAI folder:
|
||||
|
||||
```bash
|
||||
cd InvokeAI
|
||||
```
|
||||
|
||||
From this step forward make sure that you are working in the InvokeAI
|
||||
directory!
|
||||
|
||||
5. Select the appropriate environment file:
|
||||
|
||||
We have created a series of environment files suited for different operating
|
||||
systems and GPU hardware. They are located in the
|
||||
`environments-and-requirements` directory:
|
||||
|
||||
<figure markdown>
|
||||
|
||||
| filename | OS |
|
||||
| :----------------------: | :----------------------------: |
|
||||
| environment-lin-amd.yml | Linux with an AMD (ROCm) GPU |
|
||||
| environment-lin-cuda.yml | Linux with an NVIDIA CUDA GPU |
|
||||
| environment-mac.yml | Macintosh |
|
||||
| environment-win-cuda.yml | Windows with an NVIDA CUDA GPU |
|
||||
|
||||
</figure>
|
||||
|
||||
Choose the appropriate environment file for your system and link or copy it
|
||||
to `environment.yml` in InvokeAI's top-level directory. To do so, run
|
||||
following command from the repository-root:
|
||||
|
||||
!!! Example ""
|
||||
|
||||
=== "Macintosh and Linux"
|
||||
|
||||
!!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
|
||||
|
||||
```bash
|
||||
ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
|
||||
```
|
||||
|
||||
When this is done, confirm that a file `environment.yml` has been linked in
|
||||
the InvokeAI root directory and that it points to the correct file in the
|
||||
`environments-and-requirements`.
|
||||
|
||||
```bash
|
||||
ls -la
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
|
||||
!!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
|
||||
|
||||
```cmd
|
||||
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||
```
|
||||
|
||||
Afterwards verify that the file `environment.yml` has been created, either via the
|
||||
explorer or by using the command `dir` from the terminal
|
||||
|
||||
```cmd
|
||||
dir
|
||||
```
|
||||
|
||||
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
||||
|
||||
6. Create the conda environment:
|
||||
|
||||
```bash
|
||||
conda env update
|
||||
```
|
||||
|
||||
This will create a new environment named `invokeai` and install all InvokeAI
|
||||
dependencies into it. If something goes wrong you should take a look at
|
||||
[troubleshooting](#troubleshooting).
|
||||
|
||||
7. Activate the `invokeai` environment:
|
||||
|
||||
In order to use the newly created environment you will first need to
|
||||
activate it
|
||||
|
||||
```bash
|
||||
conda activate invokeai
|
||||
```
|
||||
|
||||
Your command-line prompt should change to indicate that `invokeai` is active
|
||||
by prepending `(invokeai)`.
|
||||
|
||||
8. Pre-Load the model weights files:
|
||||
|
||||
!!! tip
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [here](INSTALLING_MODELS.md).
|
||||
|
||||
```bash
|
||||
python scripts/preload_models.py
|
||||
```
|
||||
|
||||
The script `preload_models.py` will interactively guide you through the
|
||||
process of downloading and installing the weights files needed for InvokeAI.
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you have to agree to. The script will list the steps you need
|
||||
to take to create an account on the site that hosts the weights files,
|
||||
accept the agreement, and provide an access token that allows InvokeAI to
|
||||
legally download and install the weights files.
|
||||
|
||||
If you get an error message about a module not being installed, check that
|
||||
the `invokeai` environment is active and if not, repeat step 5.
|
||||
|
||||
9. Run the command-line- or the web- interface:
|
||||
|
||||
!!! example ""
|
||||
|
||||
!!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
python scripts/invoke.py
|
||||
```
|
||||
|
||||
=== "local Webserver"
|
||||
|
||||
```bash
|
||||
python scripts/invoke.py --web
|
||||
```
|
||||
|
||||
=== "Public Webserver"
|
||||
|
||||
```bash
|
||||
python scripts/invoke.py --web --host 0.0.0.0
|
||||
```
|
||||
|
||||
If you choose the run the web interface, point your browser at
|
||||
http://localhost:9090 in order to load the GUI.
|
||||
|
||||
10. Render away!
|
||||
|
||||
Browse the [features](../features/CLI.md) section to learn about all the things you
|
||||
can do with InvokeAI.
|
||||
|
||||
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
||||
card with the ROCm driver, you may have to wait for over a minute the first
|
||||
time you try to generate an image. Fortunately, after the warm up period
|
||||
rendering will be fast.
|
||||
|
||||
11. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
||||
script. If you forget to activate the 'invokeai' environment, the script
|
||||
will fail with multiple `ModuleNotFound` errors.
|
||||
|
||||
## Updating to newer versions of the script
|
||||
|
||||
This distribution is changing rapidly. If you used the `git clone` method
|
||||
(step 5) to download the InvokeAI directory, then to update to the latest and
|
||||
greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
||||
|
||||
```bash
|
||||
git pull
|
||||
conda env update
|
||||
python scripts/preload_models.py --no-interactive #optional
|
||||
```
|
||||
|
||||
This will bring your local copy into sync with the remote one. The last step may
|
||||
be needed to take advantage of new features or released models. The
|
||||
`--no-interactive` flag will prevent the script from prompting you to download
|
||||
the big Stable Diffusion weights files.
|
||||
|
||||
## pip Install
|
||||
|
||||
To install InvokeAI with only the PIP package manager, please follow these
|
||||
steps:
|
||||
|
||||
1. Make sure you are using Python 3.9 or higher. The rest of the install
|
||||
procedure depends on this:
|
||||
|
||||
```bash
|
||||
python -V
|
||||
```
|
||||
|
||||
2. Install the `virtualenv` tool if you don't have it already:
|
||||
|
||||
```bash
|
||||
pip install virtualenv
|
||||
```
|
||||
|
||||
3. From within the InvokeAI top-level directory, create and activate a virtual
|
||||
environment named `invokeai`:
|
||||
|
||||
```bash
|
||||
virtualenv invokeai
|
||||
source invokeai/bin/activate
|
||||
```
|
||||
|
||||
4. Pick the correct `requirements*.txt` file for your hardware and operating
|
||||
system.
|
||||
|
||||
We have created a series of environment files suited for different operating
|
||||
systems and GPU hardware. They are located in the
|
||||
`environments-and-requirements` directory:
|
||||
|
||||
<figure markdown>
|
||||
|
||||
| filename | OS |
|
||||
| :---------------------------------: | :-------------------------------------------------------------: |
|
||||
| requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU |
|
||||
| requirements-lin-arm64.txt | Linux running on arm64 systems |
|
||||
| requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU |
|
||||
| requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration |
|
||||
| requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU<br>(supports Google Colab too) |
|
||||
|
||||
</figure>
|
||||
|
||||
Select the appropriate requirements file, and make a link to it from
|
||||
`requirements.txt` in the top-level InvokeAI directory. The command to do
|
||||
this from the top-level directory is:
|
||||
|
||||
!!! example ""
|
||||
|
||||
=== "Macintosh and Linux"
|
||||
|
||||
!!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
|
||||
|
||||
```bash
|
||||
ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
|
||||
```
|
||||
|
||||
=== "Windows"
|
||||
|
||||
!!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
|
||||
|
||||
```cmd
|
||||
copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
|
||||
```
|
||||
|
||||
!!! warning
|
||||
|
||||
Please do not link or copy `environments-and-requirements/requirements-base.txt`.
|
||||
This is a base requirements file that does not have the platform-specific
|
||||
libraries. Also, be sure to link or copy the platform-specific file to
|
||||
a top-level file named `requirements.txt` as shown here. Running pip on
|
||||
a requirements file in a subdirectory will not work as expected.
|
||||
|
||||
When this is done, confirm that a file named `requirements.txt` has been
|
||||
created in the InvokeAI root directory and that it points to the correct
|
||||
file in `environments-and-requirements`.
|
||||
|
||||
5. Run PIP
|
||||
|
||||
Be sure that the `invokeai` environment is active before doing this:
|
||||
|
||||
```bash
|
||||
pip install --prefer-binary -r requirements.txt
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Here are some common issues and their suggested solutions.
|
||||
|
||||
### Conda
|
||||
|
||||
#### Conda fails before completing `conda update`
|
||||
|
||||
The usual source of these errors is a package incompatibility. While we have
|
||||
tried to minimize these, over time packages get updated and sometimes introduce
|
||||
incompatibilities.
|
||||
|
||||
We suggest that you search
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
|
||||
channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
||||
|
||||
You may also try to install the broken packages manually using PIP. To do this,
|
||||
activate the `invokeai` environment, and run `pip install` with the name and
|
||||
version of the package that is causing the incompatibility. For example:
|
||||
|
||||
```bash
|
||||
pip install test-tube==0.7.5
|
||||
```
|
||||
|
||||
You can keep doing this until all requirements are satisfied and the `invoke.py`
|
||||
script runs without errors. Please report to
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
|
||||
to work around the problem so that others can benefit from your investigation.
|
||||
|
||||
#### `preload_models.py` or `invoke.py` crashes at an early stage
|
||||
|
||||
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
||||
have linked to the correct environment file and run `conda update` again.
|
||||
|
||||
If the problem persists, a more extreme measure is to clear Conda's caches and
|
||||
remove the `invokeai` environment:
|
||||
|
||||
```bash
|
||||
conda deactivate
|
||||
conda env remove -n invokeai
|
||||
conda clean -a
|
||||
conda update
|
||||
```
|
||||
|
||||
This removes all cached library files, including ones that may have been
|
||||
corrupted somehow. (This is not supposed to happen, but does anyway).
|
||||
|
||||
#### `invoke.py` crashes at a later stage
|
||||
|
||||
If the CLI or web site had been working ok, but something unexpected happens
|
||||
later on during the session, you've encountered a code bug that is probably
|
||||
unrelated to an install issue. Please search
|
||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
|
||||
ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
|
||||
|
||||
#### My renders are running very slowly
|
||||
|
||||
You may have installed the wrong torch (machine learning) package, and the
|
||||
system is running on CPU rather than the GPU. To check, look at the log messages
|
||||
that appear when `invoke.py` is first starting up. One of the earlier lines
|
||||
should say `Using device type cuda`. On AMD systems, it will also say "cuda",
|
||||
and on Macintoshes, it should say "mps". If instead the message says it is
|
||||
running on "cpu", then you may need to install the correct torch library.
|
||||
|
||||
You may be able to fix this by installing a different torch library. Here are
|
||||
the magic incantations for Conda and PIP.
|
||||
|
||||
!!! todo "For CUDA systems"
|
||||
|
||||
- conda
|
||||
|
||||
```bash
|
||||
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||
```
|
||||
|
||||
- pip
|
||||
|
||||
```bash
|
||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
|
||||
```
|
||||
|
||||
!!! todo "For AMD systems"
|
||||
|
||||
- conda
|
||||
|
||||
```bash
|
||||
conda activate invokeai
|
||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||
```
|
||||
|
||||
- pip
|
||||
|
||||
```bash
|
||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||
```
|
||||
|
||||
More information and troubleshooting tips can be found at https://pytorch.org.
|
17
docs/installation/INSTALL_PCP.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
title: Installing InvokeAI with the Pre-Compiled PIP Installer
|
||||
---
|
||||
|
||||
# THIS NEEDS TO BE FLESHED OUT
|
||||
|
||||
## Introduction
|
||||
|
||||
## Walkthrough
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
### Updating the stable version
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
## Troubleshooting
|
156
docs/installation/INSTALL_SOURCE.md
Normal file
@ -0,0 +1,156 @@
|
||||
---
|
||||
title: Source Installer
|
||||
---
|
||||
|
||||
# The InvokeAI Source Installer
|
||||
|
||||
## Introduction
|
||||
|
||||
The source installer is a shell script that attempts to automate every step
|
||||
needed to install and run InvokeAI on a stock computer running recent versions
|
||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||
It is not as foolproof as the [InvokeAI installer](INSTALL_INVOKE.md)
|
||||
|
||||
Before you begin, make sure that you meet the
|
||||
[hardware requirements](index.md#Hardware_Requirements) and has the appropriate
|
||||
GPU drivers installed. In particular, if you are a Linux user with an AMD GPU
|
||||
installed, you may need to install the
|
||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||
|
||||
Installation requires roughly 18G of free disk space to load the libraries and
|
||||
recommended model weights files.
|
||||
|
||||
## Walk through
|
||||
|
||||
Though there are multiple steps, there really is only one click involved to kick
|
||||
off the process.
|
||||
|
||||
1. The source installer is distributed in ZIP files. Go to the
|
||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||
look for a series of files named:
|
||||
|
||||
- invokeAI-src-installer-mac.zip
|
||||
- invokeAI-src-installer-windows.zip
|
||||
- invokeAI-src-installer-linux.zip
|
||||
|
||||
Download the one that is appropriate for your operating system.
|
||||
|
||||
2. Unpack the zip file into a directory that has at least 18G of free space. Do
|
||||
_not_ unpack into a directory that has an earlier version of InvokeAI.
|
||||
|
||||
This will create a new directory named "InvokeAI". This example shows how
|
||||
this would look using the `unzip` command-line tool, but you may use any
|
||||
graphical or command-line Zip extractor:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> unzip invokeAI-windows.zip
|
||||
Archive: C: \Linco\Downloads\invokeAI-linux.zip
|
||||
creating: invokeAI\
|
||||
inflating: invokeAI\install.bat
|
||||
inflating: invokeAI\readme.txt
|
||||
```
|
||||
|
||||
3. If you are using a desktop GUI, double-click the installer file. It will be
|
||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||
Macintosh systems.
|
||||
|
||||
4. Alternatively, form the command line, run the shell script or .bat file:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeAI
|
||||
C:\Documents\Linco\invokeAI> install.bat
|
||||
```
|
||||
|
||||
5. Sit back and let the install script work. It will install various binary
|
||||
requirements including Conda, Git and Python, then download the current
|
||||
InvokeAI code and install it along with its dependencies.
|
||||
|
||||
6. After installation completes, the installer will launch a script called
|
||||
`preload_models.py`, which will guide you through the first-time process of
|
||||
selecting one or more Stable Diffusion model weights files, downloading and
|
||||
configuring them.
|
||||
|
||||
Note that the main Stable Diffusion weights file is protected by a license
|
||||
agreement that you must agree to in order to use. The script will list the
|
||||
steps you need to take to create an account on the official site that hosts
|
||||
the weights files, accept the agreement, and provide an access token that
|
||||
allows InvokeAI to legally download and install the weights files.
|
||||
|
||||
If you have already downloaded the weights file(s) for another Stable
|
||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
||||
|
||||
7. The script will now exit and you'll be ready to generate some images. The
|
||||
invokeAI directory will contain numerous files. Look for a shell script
|
||||
named `invoke.sh` (Linux/Mac) or `invoke.bat` (Windows). Launch the script
|
||||
by double-clicking it or typing its name at the command-line:
|
||||
|
||||
```cmd
|
||||
C:\Documents\Linco> cd invokeAI
|
||||
C:\Documents\Linco\invokeAI> invoke.bat
|
||||
```
|
||||
|
||||
The `invoke.bat` (`invoke.sh`) script will give you the choice of starting (1)
|
||||
the command-line interface, or (2) the web GUI. If you start the latter, you can
|
||||
load the user interface by pointing your browser at http://localhost:9090.
|
||||
|
||||
The `invoke` script also offers you a third option labeled "open the developer
|
||||
console". If you choose this option, you will be dropped into a command-line
|
||||
interface in which you can run python commands directly, access developer tools,
|
||||
and launch InvokeAI with customized options. To do the latter, you would launch
|
||||
the script `scripts/invoke.py` as shown in this example:
|
||||
|
||||
```cmd
|
||||
python scripts/invoke.py --web --max_load_models=3 \
|
||||
--model=waifu-1.3 --steps=30 --outdir=C:/Documents/AIPhotos
|
||||
```
|
||||
|
||||
These options are described in detail in the
|
||||
[Command-Line Interface](../features/CLI.md) documentation.
|
||||
|
||||
## Updating to newer versions
|
||||
|
||||
This section describes how to update InvokeAI to new versions of the software.
|
||||
|
||||
### Updating the stable version
|
||||
|
||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||
To update to the latest released version (recommended), run the `update.sh`
|
||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||
release and re-run the `preload_models` script to download any updated models
|
||||
files that may be needed. You can also use this to add additional models that
|
||||
you did not select at installation time.
|
||||
|
||||
### Updating to the development version
|
||||
|
||||
There may be times that there is a feature in the `development` branch of
|
||||
InvokeAI that you'd like to take advantage of. Or perhaps there is a branch that
|
||||
corrects an annoying bug. To do this, you will use the developer's console.
|
||||
|
||||
From within the invokeAI directory, run the command `invoke.sh` (Linux/Mac) or
|
||||
`invoke.bat` (Windows) and selection option (3) to open the developers console.
|
||||
Then run the following command to get the `development branch`:
|
||||
|
||||
```bash
|
||||
git checkout development
|
||||
git pull
|
||||
conda env update
|
||||
```
|
||||
|
||||
You can now close the developer console and run `invoke` as before. If you get
|
||||
complaints about missing models, then you may need to do the additional step of
|
||||
running `preload_models.py`. This happens relatively infrequently. To do this,
|
||||
simply open up the developer's console again and type
|
||||
`python scripts/preload_models.py`.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you run into problems during or after installation, the InvokeAI team is
|
||||
available to help you. Either create an
|
||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||
make a request for help on the "bugs-and-support" channel of our
|
||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||
organization, but typically somebody will be available to help you within 24
|
||||
hours, and often much sooner.
|
62
docs/installation/index.md
Normal file
@ -0,0 +1,62 @@
|
||||
---
|
||||
title: Overview
|
||||
---
|
||||
|
||||
We offer several ways to install InvokeAI, each one suited to your
|
||||
experience and preferences.
|
||||
|
||||
1. [InvokeAI installer](INSTALL_INVOKE.md)
|
||||
|
||||
This is a installer script that installs InvokeAI and all the
|
||||
third party libraries it depends on. When a new version of
|
||||
InvokeAI is released, you will download and reinstall the new
|
||||
version.
|
||||
|
||||
This installer is designed for people who want the system to "just
|
||||
work", don't have an interest in tinkering with it, and do not
|
||||
care about upgrading to unreleased experimental features.
|
||||
|
||||
**Important Caveats**
|
||||
- This script does not support AMD GPUs. For Linux AMD support,
|
||||
please use the manual or source code installer methods.
|
||||
- This script has difficulty on some Macintosh machines
|
||||
that have previously been used for Python development due to
|
||||
conflicting development tools versions. Mac developers may wish
|
||||
to try the source code installer or one of the manual methods instead.
|
||||
|
||||
2. [Source code installer](INSTALL_SOURCE.md)
|
||||
|
||||
This is a script that will install InvokeAI and all its essential
|
||||
third party libraries. In contrast to the previous installer, it
|
||||
includes access to a "developer console" which will allow you to
|
||||
access experimental features on the development branch.
|
||||
|
||||
This method is recommended for individuals who are wish to stay
|
||||
on the cutting edge of InvokeAI development and are not afraid
|
||||
of occasional breakage.
|
||||
|
||||
3. [Manual Installation](INSTALL_MANUAL.md)
|
||||
|
||||
In this method you will manually run the commands needed to install
|
||||
InvokeAI and its dependencies. We offer two recipes: one suited to
|
||||
those who prefer the `conda` tool, and one suited to those who prefer
|
||||
`pip` and Python virtual environments.
|
||||
|
||||
This method is recommended for users who have previously used `conda`
|
||||
or `pip` in the past, developers, and anyone who wishes to remain on
|
||||
the cutting edge of future InvokeAI development and is willing to put
|
||||
up with occasional glitches and breakage.
|
||||
|
||||
4. [Docker Installation](INSTALL_DOCKER.md)
|
||||
|
||||
We also offer a method for creating Docker containers containing
|
||||
InvokeAI and its dependencies. This method is recommended for
|
||||
individuals with experience with Docker containers and understand
|
||||
the pluses and minuses of a container-based install.
|
||||
|
||||
5. [Jupyter Notebooks Installation](INSTALL_JUPYTER.md)
|
||||
|
||||
This method is suitable for running InvokeAI on a Google Colab
|
||||
account. It is recommended for individuals who have previously
|
||||
worked on the Colab and are comfortable with the Jupyter notebook
|
||||
environment.
|
@ -42,14 +42,25 @@ title: Manual Installation, Linux
|
||||
```
|
||||
|
||||
5. Use anaconda to copy necessary python packages, create a new python
|
||||
environment named `invokeai` and activate the environment.
|
||||
environment named `invokeai` and then activate the environment.
|
||||
|
||||
```bash
|
||||
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
||||
(base) ~/InvokeAI$ conda env create
|
||||
(base) ~/InvokeAI$ conda activate invokeai
|
||||
(invokeai) ~/InvokeAI$
|
||||
```
|
||||
!!! todo "For systems with a CUDA (Nvidia) card:"
|
||||
|
||||
```bash
|
||||
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
||||
(base) ~/InvokeAI$ conda env create -f environment-cuda.yml
|
||||
(base) ~/InvokeAI$ conda activate invokeai
|
||||
(invokeai) ~/InvokeAI$
|
||||
```
|
||||
|
||||
!!! todo "For systems with an AMD card (using ROCm driver):"
|
||||
|
||||
```bash
|
||||
(base) rm -rf src # (this is a precaution in case there is already a src directory)
|
||||
(base) ~/InvokeAI$ conda env create -f environment-AMD.yml
|
||||
(base) ~/InvokeAI$ conda activate invokeai
|
||||
(invokeai) ~/InvokeAI$
|
||||
```
|
||||
|
||||
After these steps, your command prompt will be prefixed by `(invokeai)` as
|
||||
shown above.
|
@ -13,22 +13,9 @@ one of the steps, so instead of cloning this repo, simply download the notebook
|
||||
from the link above and load it up in VSCode (with the appropriate extensions
|
||||
installed)/Jupyter/JupyterLab and start running the cells one-by-one.
|
||||
|
||||
Note that you will need NVIDIA drivers, Python 3.10, and Git installed
|
||||
beforehand - simplified
|
||||
[step-by-step instructions](https://github.com/invoke-ai/InvokeAI/wiki/Easy-peasy-Windows-install)
|
||||
are available in the wiki (you'll only need steps 1, 2, & 3 ).
|
||||
Note that you will need NVIDIA drivers, Python 3.10, and Git installed beforehand.
|
||||
|
||||
## **Manual Install**
|
||||
|
||||
### **pip**
|
||||
|
||||
See
|
||||
[Easy-peasy Windows install](https://github.com/invoke-ai/InvokeAI/wiki/Easy-peasy-Windows-install)
|
||||
in the wiki
|
||||
|
||||
---
|
||||
|
||||
### **Conda**
|
||||
## **Manual Install with Conda**
|
||||
|
||||
1. Install Anaconda3 (miniconda3 version) from [here](https://docs.anaconda.com/anaconda/install/windows/)
|
||||
|
||||
@ -52,23 +39,29 @@ in the wiki
|
||||
cd InvokeAI
|
||||
```
|
||||
|
||||
6. Run the following two commands:
|
||||
6. Run the following commands:
|
||||
|
||||
```batch title="step 6a"
|
||||
conda env create
|
||||
```
|
||||
!!! todo "For systems with a CUDA (Nvidia) card:"
|
||||
|
||||
```batch title="step 6b"
|
||||
conda activate invokeai
|
||||
```
|
||||
```bash
|
||||
rmdir src # (this is a precaution in case there is already a src directory)
|
||||
conda env create -f environment-cuda.yml
|
||||
conda activate invokeai
|
||||
(invokeai)>
|
||||
```
|
||||
|
||||
!!! todo "For systems with an AMD card (using ROCm driver):"
|
||||
|
||||
```bash
|
||||
rmdir src # (this is a precaution in case there is already a src directory)
|
||||
conda env create -f environment-AMD.yml
|
||||
conda activate invokeai
|
||||
(invokeai)>
|
||||
```
|
||||
|
||||
This will install all python requirements and activate the "invokeai" environment
|
||||
which sets PATH and other environment variables properly.
|
||||
|
||||
Note that the long form of the first command is `conda env create -f environment.yml`. If the
|
||||
environment file isn't specified, conda will default to `environment.yml`. You will need
|
||||
to provide the `-f` option if you wish to load a different environment file at any point.
|
||||
|
||||
7. Load the big stable diffusion weights files and a couple of smaller machine-learning models:
|
||||
|
||||
```bash
|
@ -4,40 +4,41 @@ channels:
|
||||
- conda-forge
|
||||
- defaults
|
||||
dependencies:
|
||||
- python=3.10
|
||||
- pip>=22.2.2
|
||||
- albumentations=0.4.3
|
||||
- cudatoolkit
|
||||
- pytorch
|
||||
- torchvision
|
||||
- numpy=1.23
|
||||
- imageio=2.21
|
||||
- opencv=4.6
|
||||
- pillow=8.*
|
||||
- einops=0.3.0
|
||||
- eventlet
|
||||
- flask-socketio=5.3.0
|
||||
- flask=2.1.*
|
||||
- flask_cors=3.0.10
|
||||
- flask-socketio=5.3.0
|
||||
- send2trash=1.8.0
|
||||
- eventlet
|
||||
- albumentations=0.4.3
|
||||
- pudb=2019.2
|
||||
- imageio-ffmpeg=0.4.2
|
||||
- pytorch-lightning=1.7.7
|
||||
- streamlit
|
||||
- einops=0.3.0
|
||||
- imageio=2.9.0
|
||||
- kornia=0.6
|
||||
- torchmetrics=0.7.0
|
||||
- transformers=4.23
|
||||
- torch-fidelity=0.3.0
|
||||
- numpy=1.19
|
||||
- opencv=4.6.0
|
||||
- pillow=8.*
|
||||
- pip>=22.2.2
|
||||
- pudb=2019.2
|
||||
- python=3.9.*
|
||||
- pytorch
|
||||
- pytorch-lightning=1.7.7
|
||||
- send2trash=1.8.0
|
||||
- streamlit
|
||||
- tokenizers>=0.11.1,!=0.11.3,<0.13
|
||||
- torch-fidelity=0.3.0
|
||||
- torchmetrics=0.7.0
|
||||
- torchvision
|
||||
- transformers=4.21.3
|
||||
- pip:
|
||||
- dependency_injector==4.40.0
|
||||
- getpass_asterisk
|
||||
- omegaconf==2.1.1
|
||||
- pyreadline3
|
||||
- realesrgan
|
||||
- taming-transformers-rom1504
|
||||
- test-tube>=0.7.5
|
||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- git+https://github.com/invoke-ai/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
|
||||
- git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN#egg=gfpgan
|
||||
- -e .
|
45
environments-and-requirements/environment-lin-amd.yml
Normal file
@ -0,0 +1,45 @@
|
||||
name: invokeai
|
||||
channels:
|
||||
- pytorch
|
||||
- conda-forge
|
||||
- defaults
|
||||
dependencies:
|
||||
- python>=3.9
|
||||
- pip=22.2.2
|
||||
- numpy=1.23.3
|
||||
- pip:
|
||||
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||
- albumentations==0.4.3
|
||||
- dependency_injector==4.40.0
|
||||
- diffusers==0.6.0
|
||||
- einops==0.3.0
|
||||
- eventlet
|
||||
- flask==2.1.3
|
||||
- flask_cors==3.0.10
|
||||
- flask_socketio==5.3.0
|
||||
- getpass_asterisk
|
||||
- imageio-ffmpeg==0.4.2
|
||||
- imageio==2.9.0
|
||||
- kornia==0.6.0
|
||||
- omegaconf==2.2.3
|
||||
- opencv-python==4.5.5.64
|
||||
- pillow==9.2.0
|
||||
- pudb==2019.2
|
||||
- pyreadline3
|
||||
- pytorch-lightning==1.7.7
|
||||
- realesrgan
|
||||
- send2trash==1.8.0
|
||||
- streamlit==1.12.0
|
||||
- taming-transformers-rom1504
|
||||
- test-tube>=0.7.5
|
||||
- torch
|
||||
- torch-fidelity==0.3.0
|
||||
- torchaudio
|
||||
- torchmetrics==0.7.0
|
||||
- torchvision
|
||||
- transformers==4.21.3
|
||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN#egg=gfpgan
|
||||
- -e .
|
@ -13,32 +13,33 @@ dependencies:
|
||||
- cudatoolkit=11.6
|
||||
- pip:
|
||||
- albumentations==0.4.3
|
||||
- opencv-python==4.5.5.64
|
||||
- pudb==2019.2
|
||||
- imageio==2.9.0
|
||||
- imageio-ffmpeg==0.4.2
|
||||
- pytorch-lightning==1.7.7
|
||||
- omegaconf==2.2.3
|
||||
- test-tube>=0.7.5
|
||||
- streamlit==1.12.0
|
||||
- send2trash==1.8.0
|
||||
- pillow==9.2.0
|
||||
- einops==0.3.0
|
||||
- pyreadline3
|
||||
- torch-fidelity==0.3.0
|
||||
- transformers==4.21.3
|
||||
- dependency_injector==4.40.0
|
||||
- diffusers==0.6.0
|
||||
- torchmetrics==0.7.0
|
||||
- flask==2.1.3
|
||||
- flask_socketio==5.3.0
|
||||
- flask_cors==3.0.10
|
||||
- einops==0.3.0
|
||||
- eventlet
|
||||
- flask==2.1.3
|
||||
- flask_cors==3.0.10
|
||||
- flask_socketio==5.3.0
|
||||
- getpass_asterisk
|
||||
- imageio-ffmpeg==0.4.2
|
||||
- imageio==2.9.0
|
||||
- kornia==0.6.0
|
||||
- omegaconf==2.2.3
|
||||
- opencv-python==4.5.5.64
|
||||
- pillow==9.2.0
|
||||
- pudb==2019.2
|
||||
- pyreadline3
|
||||
- pytorch-lightning==1.7.7
|
||||
- realesrgan
|
||||
- send2trash==1.8.0
|
||||
- streamlit==1.12.0
|
||||
- taming-transformers-rom1504
|
||||
- test-tube>=0.7.5
|
||||
- torch-fidelity==0.3.0
|
||||
- torchmetrics==0.7.0
|
||||
- transformers==4.21.3
|
||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- git+https://github.com/invoke-ai/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
|
||||
- git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN#egg=gfpgan
|
||||
- -e .
|
@ -58,7 +58,7 @@ dependencies:
|
||||
- git+https://github.com/invoke-ai/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
|
||||
- git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
|
||||
- git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- -e .
|
||||
variables:
|
||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
46
environments-and-requirements/environment-win-cuda.yml
Normal file
@ -0,0 +1,46 @@
|
||||
name: invokeai
|
||||
channels:
|
||||
- pytorch
|
||||
- conda-forge
|
||||
- defaults
|
||||
dependencies:
|
||||
- python>=3.9
|
||||
- pip=22.2.2
|
||||
- numpy=1.23.3
|
||||
- torchvision=0.13.1
|
||||
- torchaudio=0.12.1
|
||||
- pytorch=1.12.1
|
||||
- cudatoolkit=11.6
|
||||
- pip:
|
||||
- albumentations==0.4.3
|
||||
- basicsr==1.4.1
|
||||
- dependency_injector==4.40.0
|
||||
- diffusers==0.6.0
|
||||
- einops==0.3.0
|
||||
- eventlet
|
||||
- flask==2.1.3
|
||||
- flask_cors==3.0.10
|
||||
- flask_socketio==5.3.0
|
||||
- getpass_asterisk
|
||||
- imageio-ffmpeg==0.4.2
|
||||
- imageio==2.9.0
|
||||
- kornia==0.6.0
|
||||
- omegaconf==2.2.3
|
||||
- opencv-python==4.5.5.64
|
||||
- pillow==9.2.0
|
||||
- pudb==2019.2
|
||||
- pyreadline3
|
||||
- pytorch-lightning==1.7.7
|
||||
- realesrgan
|
||||
- send2trash==1.8.0
|
||||
- streamlit==1.12.0
|
||||
- taming-transformers-rom1504
|
||||
- test-tube>=0.7.5
|
||||
- torch-fidelity==0.3.0
|
||||
- torchmetrics==0.7.0
|
||||
- transformers==4.21.3
|
||||
- git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
- git+https://github.com/invoke-ai/GFPGAN#egg=gfpgan
|
||||
- -e .
|
@ -1,41 +1,36 @@
|
||||
--prefer-binary
|
||||
|
||||
# pip will resolve the version which matches torch
|
||||
albumentations
|
||||
dependency_injector==4.40.0
|
||||
diffusers
|
||||
einops
|
||||
eventlet
|
||||
flask==2.1.3
|
||||
flask_cors==3.0.10
|
||||
flask_socketio==5.3.0
|
||||
flaskwebgui==0.3.7
|
||||
getpass_asterisk
|
||||
huggingface-hub
|
||||
imageio-ffmpeg
|
||||
imageio
|
||||
imageio-ffmpeg
|
||||
kornia
|
||||
# pip will resolve the version which matches torch
|
||||
numpy
|
||||
omegaconf
|
||||
opencv-python
|
||||
pillow
|
||||
pip>=22
|
||||
pudb
|
||||
pytorch-lightning==1.7.7
|
||||
scikit-image>=0.19
|
||||
streamlit
|
||||
pyreadline3
|
||||
# "CompVis/taming-transformers" IS NOT INSTALLABLE
|
||||
# This is a drop-in replacement
|
||||
pytorch-lightning==1.7.7
|
||||
realesrgan
|
||||
scikit-image>=0.19
|
||||
send2trash
|
||||
streamlit
|
||||
taming-transformers-rom1504
|
||||
test-tube
|
||||
torch-fidelity
|
||||
torchmetrics
|
||||
transformers==4.21.*
|
||||
flask==2.1.3
|
||||
flask_socketio==5.3.0
|
||||
flask_cors==3.0.10
|
||||
flaskwebgui==0.3.7
|
||||
send2trash
|
||||
dependency_injector==4.40.0
|
||||
eventlet
|
||||
realesrgan
|
||||
diffusers
|
||||
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||
git+https://github.com/invoke-ai/Real-ESRGAN.git#egg=realesrgan
|
||||
git+https://github.com/invoke-ai/GFPGAN.git#egg=gfpgan
|
||||
git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
git+https://github.com/invoke-ai/GFPGAN#egg=gfpgan
|
@ -1,4 +1,4 @@
|
||||
-r requirements.txt
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
|
||||
# Get hardware-appropriate torch/torchvision
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm5.1.1 --trusted-host https://download.pytorch.org
|
3
environments-and-requirements/requirements-lin-arm64.txt
Normal file
@ -0,0 +1,3 @@
|
||||
--pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cpu
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
-e .
|
2
environments-and-requirements/requirements-lin-cuda.txt
Normal file
@ -0,0 +1,2 @@
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
-e .
|
@ -1,4 +1,4 @@
|
||||
-r requirements.txt
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
|
||||
protobuf==3.19.6
|
||||
torch<1.13.0
|
@ -1,7 +1,8 @@
|
||||
-r requirements.txt
|
||||
-r environments-and-requirements/requirements-base.txt
|
||||
|
||||
# Get hardware-appropriate torch/torchvision
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116 --trusted-host https://download.pytorch.org
|
||||
basicsr==1.4.1
|
||||
torch==1.12.1
|
||||
torchvision==0.13.1
|
||||
-e .
|
829
frontend/dist/assets/index.4488003f.js
vendored
690
frontend/dist/assets/index.ae92a637.js
vendored
517
frontend/dist/assets/index.cc049b93.js
vendored
517
frontend/dist/assets/index.e2832fd4.js
vendored
2
frontend/dist/index.html
vendored
@ -6,7 +6,7 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon.0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index.1fc0290b.js"></script>
|
||||
<script type="module" crossorigin src="./assets/index.a8ba2a6c.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.40a72c80.css">
|
||||
</head>
|
||||
|
||||
|
10651
frontend/package-lock.json
generated
@ -1,4 +1,4 @@
|
||||
import { IconButton, Image } from '@chakra-ui/react';
|
||||
import { IconButton, Image, Spinner } from '@chakra-ui/react';
|
||||
import { useState } from 'react';
|
||||
import { FaAngleLeft, FaAngleRight } from 'react-icons/fa';
|
||||
import { RootState, useAppDispatch, useAppSelector } from '../../app/store';
|
||||
@ -30,7 +30,6 @@ export const imagesSelector = createSelector(
|
||||
|
||||
return {
|
||||
imageToDisplay: intermediateImage ? intermediateImage : currentImage,
|
||||
isIntermediate: intermediateImage,
|
||||
currentCategory,
|
||||
isOnFirstImage: currentImageIndex === 0,
|
||||
isOnLastImage:
|
||||
@ -56,7 +55,6 @@ export default function CurrentImagePreview() {
|
||||
isOnLastImage,
|
||||
shouldShowImageDetails,
|
||||
imageToDisplay,
|
||||
isIntermediate,
|
||||
} = useAppSelector(imagesSelector);
|
||||
|
||||
const [shouldShowNextPrevButtons, setShouldShowNextPrevButtons] =
|
||||
@ -83,8 +81,8 @@ export default function CurrentImagePreview() {
|
||||
{imageToDisplay && (
|
||||
<Image
|
||||
src={imageToDisplay.url}
|
||||
width={isIntermediate ? imageToDisplay.width : undefined}
|
||||
height={isIntermediate ? imageToDisplay.height : undefined}
|
||||
width={imageToDisplay.width}
|
||||
height={imageToDisplay.height}
|
||||
/>
|
||||
)}
|
||||
{!shouldShowImageDetails && (
|
||||
|
2968
frontend/yarn.lock
BIN
installer/WinLongPathsEnabled.reg
Normal file
@ -1,22 +1,29 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
echo "Be certain that you're in the 'installer' directory before continuing."
|
||||
read -p "Press any key to continue, or CTRL-C to exit..."
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.sh invokeAI
|
||||
cp readme.txt invokeAI
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.sh InvokeAI
|
||||
cp readme.txt InvokeAI
|
||||
|
||||
zip -r invokeAI-linux.zip invokeAI
|
||||
zip -r invokeAI-mac.zip invokeAI
|
||||
zip -r InvokeAI-linux.zip InvokeAI
|
||||
zip -r InvokeAI-mac.zip InvokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.bat invokeAI
|
||||
cp readme.txt invokeAI
|
||||
rm -rf InvokeAI
|
||||
mkdir -p InvokeAI
|
||||
cp install.bat InvokeAI
|
||||
cp readme.txt InvokeAI
|
||||
cp WinLongPathsEnabled.reg InvokeAI
|
||||
|
||||
zip -r invokeAI-windows.zip invokeAI
|
||||
zip -r InvokeAI-windows.zip InvokeAI
|
||||
|
||||
echo "The installer zips are ready to be distributed.."
|
||||
rm -rf InvokeAI
|
||||
|
||||
echo "The installer zips are ready for distribution."
|
||||
|
@ -1,115 +1,164 @@
|
||||
@echo off
|
||||
|
||||
@rem This script will install git and conda (if not found on the PATH variable)
|
||||
@rem This script will install git (if not found on the PATH variable)
|
||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
@rem For users who already have git and conda, this step will be skipped.
|
||||
@rem For users who already have git, this step will be skipped.
|
||||
|
||||
@rem Next, it'll checkout the project's git repo, if necessary.
|
||||
@rem Finally, it'll create the conda environment and preload the models.
|
||||
@rem Next, it'll download the project's source code.
|
||||
@rem Then it will download a self-contained, standalone Python and unpack it.
|
||||
@rem Finally, it'll create the Python virtual environment and preload the models.
|
||||
|
||||
@rem This enables a user to install this project without manually installing conda and git.
|
||||
@rem This enables a user to install this project without manually installing git or Python
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo.
|
||||
echo ***** Installing InvokeAI.. *****
|
||||
|
||||
@rem config
|
||||
set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba
|
||||
set PATH=c:\windows\system32
|
||||
|
||||
@rem Config
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
@rem https://mamba.readthedocs.io/en/latest/installation.html
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set REPO_URL=https://github.com/invoke-ai/InvokeAI.git
|
||||
set umamba_exists=F
|
||||
@rem Change the download URL to an InvokeAI repo's release URL
|
||||
|
||||
@rem figure out whether git and conda needs to be installed
|
||||
if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
set RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
set RELEASE_SOURCEBALL=/archive/refs/heads/v2.1.3.tar.gz
|
||||
set PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
set PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-x86_64-pc-windows-msvc-shared-install_only.tar.gz
|
||||
|
||||
set PACKAGES_TO_INSTALL=
|
||||
|
||||
call conda --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda
|
||||
|
||||
call git --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" EQU "0" set umamba_exists=T
|
||||
@rem Cleanup
|
||||
del /q .tmp1 .tmp2
|
||||
|
||||
@rem (if necessary) install git and conda into a contained environment
|
||||
@rem (if necessary) install git into a contained environment
|
||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
||||
@rem download micromamba
|
||||
if "%umamba_exists%" == "F" (
|
||||
echo "Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
echo ***** Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to micromamba.exe *****
|
||||
|
||||
mkdir "%MAMBA_ROOT_PREFIX%"
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > micromamba.exe
|
||||
|
||||
@rem test the mamba binary
|
||||
echo Micromamba version:
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version
|
||||
)
|
||||
@rem test the mamba binary
|
||||
echo ***** Micromamba version: *****
|
||||
call micromamba.exe --version
|
||||
|
||||
@rem create the installer env
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
call micromamba.exe create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
)
|
||||
|
||||
echo "Packages to install:%PACKAGES_TO_INSTALL%"
|
||||
echo ***** Packages to install:%PACKAGES_TO_INSTALL% *****
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
call micromamba.exe install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
echo "There was a problem while installing%PACKAGES_TO_INSTALL% using micromamba. Cannot continue."
|
||||
echo ----- There was a problem while installing "%PACKAGES_TO_INSTALL%" using micromamba. Cannot continue. -----
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
)
|
||||
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
del /q micromamba.exe
|
||||
|
||||
@rem get the repo (and load into the current directory)
|
||||
if not exist ".git" (
|
||||
call git config --global init.defaultBranch main
|
||||
call git init
|
||||
call git remote add origin %REPO_URL%
|
||||
call git fetch
|
||||
# call git checkout origin/main -ft
|
||||
call git checkout origin/release-candidate-2-1 -ft
|
||||
)
|
||||
@rem For 'git' only
|
||||
set PATH=%INSTALL_ENV_DIR%\Library\bin;%PATH%
|
||||
|
||||
@rem activate the base env
|
||||
call conda activate
|
||||
@rem Download/unpack/clean up InvokeAI release sourceball
|
||||
set err_msg=----- InvokeAI source download failed -----
|
||||
curl -L %RELEASE_URL%/%RELEASE_SOURCEBALL% --output InvokeAI.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
@rem create the environment
|
||||
call conda env remove -n invokeai
|
||||
call conda env create
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "Something went wrong while installing Python libraries and cannot continue.
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods."
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
set err_msg=----- InvokeAI source unpack failed -----
|
||||
tar -zxf InvokeAI.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
del /q InvokeAI.tgz
|
||||
|
||||
set err_msg=----- InvokeAI source copy failed -----
|
||||
cd InvokeAI-*
|
||||
xcopy . .. /e /h
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
cd ..
|
||||
|
||||
@rem cleanup
|
||||
for /f %%i in ('dir /b InvokeAI-*') do rd /s /q %%i
|
||||
rd /s /q .dev_scripts .github docker-build tests
|
||||
del /q requirements.in requirements-mkdocs.txt shell.nix
|
||||
|
||||
echo ***** Unpacked InvokeAI source *****
|
||||
|
||||
@rem Download/unpack/clean up python-build-standalone
|
||||
set err_msg=----- Python download failed -----
|
||||
curl -L %PYTHON_BUILD_STANDALONE_URL%/%PYTHON_BUILD_STANDALONE% --output python.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- Python unpack failed -----
|
||||
tar -zxf python.tgz
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
del /q python.tgz
|
||||
|
||||
echo ***** Unpacked python-build-standalone *****
|
||||
|
||||
@rem create venv
|
||||
set err_msg=----- problem creating venv -----
|
||||
.\python\python -E -s -m venv .venv
|
||||
@rem In reality, the following is ALL that 'activate.bat' does,
|
||||
@rem aside from setting the prompt, which we don't care about
|
||||
set PYTHONPATH=
|
||||
set PATH=.venv\Scripts;%PATH%
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Created Python virtual environment *****
|
||||
|
||||
@rem Print venv's Python version
|
||||
set err_msg=----- problem calling venv's python -----
|
||||
echo We're running under
|
||||
.venv\Scripts\python --version
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- pip update failed -----
|
||||
.venv\Scripts\python -m pip install --no-cache-dir --no-warn-script-location --upgrade pip
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Updated pip *****
|
||||
|
||||
set err_msg=----- requirements file copy failed -----
|
||||
copy installer\py3.10-windows-x86_64-cuda-reqs.txt requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- main pip install failed -----
|
||||
.venv\Scripts\python -m pip install --no-cache-dir --no-warn-script-location -r requirements.txt
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- clipseg install failed -----
|
||||
.venv\Scripts\python -m pip install --no-cache-dir --no-warn-script-location git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
set err_msg=----- InvokeAI setup failed -----
|
||||
.venv\Scripts\python -m pip install --no-cache-dir --no-warn-script-location -e .
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Installed Python dependencies *****
|
||||
|
||||
call conda activate invokeai
|
||||
@rem preload the models
|
||||
call python scripts\preload_models.py
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. To run preload_models.py again,"
|
||||
echo "run the command 'update.bat' in this directory."
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
call .venv\Scripts\python scripts\preload_models.py
|
||||
set err_msg=----- model download clone failed -----
|
||||
if %errorlevel% neq 0 goto err_exit
|
||||
|
||||
echo ***** Finished downloading models *****
|
||||
|
||||
echo ***** Installing invoke.bat ******
|
||||
copy installer\invoke.bat .\invoke.bat
|
||||
echo All done! Execute the file invoke.bat in this directory to start InvokeAI
|
||||
|
||||
@rem more cleanup
|
||||
rd /s /q installer installer_files
|
||||
|
||||
@rem tell the user their next steps
|
||||
echo ""
|
||||
echo "* InvokeAI installed successfully *"
|
||||
echo "You can now start generating images by double-clicking the 'invoke.bat' file (inside this folder)
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit 0
|
||||
exit
|
||||
|
||||
:err_exit
|
||||
echo %err_msg%
|
||||
pause
|
||||
exit
|
||||
|
@ -1,130 +1,211 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script will install git and conda (if not found on the PATH variable)
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
function _err_exit {
|
||||
if test "$1" -ne 0
|
||||
then
|
||||
echo -e "Error code $1; Error caught was '$2'"
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
fi
|
||||
}
|
||||
|
||||
# This script will install git (if not found on the PATH variable)
|
||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
# For users who already have git and conda, this step will be skipped.
|
||||
# For users who already have git, this step will be skipped.
|
||||
|
||||
# Next, it'll checkout the project's git repo, if necessary.
|
||||
# Finally, it'll create the conda environment and preload the models.
|
||||
# Next, it'll download the project's source code.
|
||||
# Then it will download a self-contained, standalone Python and unpack it.
|
||||
# Finally, it'll create the Python virtual environment and preload the models.
|
||||
|
||||
# This enables a user to install this project without manually installing conda and git.
|
||||
# This enables a user to install this project without manually installing git or Python
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
echo -e "\n***** Installing InvokeAI... *****\n"
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo ""
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="mac";;
|
||||
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||
Darwin*) OS_NAME="darwin";;
|
||||
*) echo -e "\n----- Unknown OS: $OS_NAME! This script runs only on Linux or MacOS -----\n" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) OS_ARCH="64";;
|
||||
arm64*) OS_ARCH="arm64";;
|
||||
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
||||
x86_64*) ;;
|
||||
arm64*) ;;
|
||||
*) echo -e "\n----- Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64 -----\n" && exit
|
||||
esac
|
||||
|
||||
# https://mamba.readthedocs.io/en/latest/installation.html
|
||||
if [ "$OS_NAME" == "linux" ] && [ "$OS_ARCH" == "arm64" ]; then OS_ARCH="aarch64"; fi
|
||||
MAMBA_OS_NAME=$OS_NAME
|
||||
MAMBA_ARCH=$OS_ARCH
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
MAMBA_OS_NAME="osx"
|
||||
fi
|
||||
|
||||
if [ "$OS_ARCH" == "linux" ]; then
|
||||
MAMBA_ARCH="aarch64"
|
||||
fi
|
||||
|
||||
if [ "$OS_ARCH" == "x86_64" ]; then
|
||||
MAMBA_ARCH="64"
|
||||
fi
|
||||
|
||||
PY_ARCH=$OS_ARCH
|
||||
if [ "$OS_ARCH" == "arm64" ]; then
|
||||
PY_ARCH="aarch64"
|
||||
fi
|
||||
|
||||
# Compute device ('cd' segment of reqs files) detect goes here
|
||||
# This needs a ton of work
|
||||
# Suggestions:
|
||||
# - lspci
|
||||
# - check $PATH for nvidia-smi, gtt CUDA/GPU version from output
|
||||
# - Surely there's a similar utility for AMD?
|
||||
CD="cuda"
|
||||
if [ "$OS_NAME" == "darwin" ] && [ "$OS_ARCH" == "arm64" ]; then
|
||||
CD="mps"
|
||||
fi
|
||||
|
||||
# config
|
||||
export MAMBA_ROOT_PREFIX="$(pwd)/installer_files/mamba"
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${OS_NAME}-${OS_ARCH}/latest"
|
||||
REPO_URL="https://github.com/invoke-ai/InvokeAI.git"
|
||||
umamba_exists="F"
|
||||
|
||||
# figure out whether git and conda needs to be installed
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${MAMBA_OS_NAME}-${MAMBA_ARCH}/latest"
|
||||
RELEASE_URL=https://github.com/invoke-ai/InvokeAI
|
||||
RELEASE_SOURCEBALL=/archive/refs/heads/v2.1.3.tar.gz
|
||||
PYTHON_BUILD_STANDALONE_URL=https://github.com/indygreg/python-build-standalone/releases/download
|
||||
if [ "$OS_NAME" == "darwin" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-apple-darwin-install_only.tar.gz
|
||||
elif [ "$OS_NAME" == "linux" ]; then
|
||||
PYTHON_BUILD_STANDALONE=20221002/cpython-3.10.7+20221002-${PY_ARCH}-unknown-linux-gnu-install_only.tar.gz
|
||||
fi
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
|
||||
if ! hash "conda" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL conda"; fi
|
||||
if ! hash "git" &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||
|
||||
if "$MAMBA_ROOT_PREFIX/micromamba" --version &>/dev/null; then umamba_exists="T"; fi
|
||||
|
||||
# (if necessary) install git and conda into a contained environment
|
||||
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
||||
# download micromamba
|
||||
if [ "$umamba_exists" == "F" ]; then
|
||||
echo "Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to $MAMBA_ROOT_PREFIX/micromamba"
|
||||
echo -e "\n***** Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to micromamba *****\n"
|
||||
|
||||
mkdir -p "$MAMBA_ROOT_PREFIX"
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvj bin/micromamba -O > "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvj bin/micromamba -O > micromamba
|
||||
|
||||
chmod u+x "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
chmod u+x "micromamba"
|
||||
|
||||
# test the mamba binary
|
||||
echo "Micromamba version:"
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" --version
|
||||
fi
|
||||
# test the mamba binary
|
||||
echo -e "\n***** Micromamba version: *****\n"
|
||||
"micromamba" --version
|
||||
|
||||
# create the installer env
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" create -y --prefix "$INSTALL_ENV_DIR"
|
||||
"micromamba" create -y --prefix "$INSTALL_ENV_DIR"
|
||||
fi
|
||||
|
||||
echo "Packages to install:$PACKAGES_TO_INSTALL"
|
||||
echo -e "\n***** Packages to install:$PACKAGES_TO_INSTALL *****\n"
|
||||
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge $PACKAGES_TO_INSTALL
|
||||
"micromamba" install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge $PACKAGES_TO_INSTALL
|
||||
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
echo "There was a problem while initializing micromamba. Cannot continue."
|
||||
echo -e "\n----- There was a problem while initializing micromamba. Cannot continue. -----\n"
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
rm -f micromamba.exe
|
||||
|
||||
# get the repo (and load into the current directory)
|
||||
if [ ! -e ".git" ]; then
|
||||
git config --global init.defaultBranch main
|
||||
git init
|
||||
git remote add origin "$REPO_URL"
|
||||
git fetch
|
||||
git checkout origin/release-candidate-2-1 -ft
|
||||
fi
|
||||
export PATH="$INSTALL_ENV_DIR/bin:$PATH"
|
||||
|
||||
# create the environment
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||
# Download/unpack/clean up InvokeAI release sourceball
|
||||
_err_msg="\n----- InvokeAI source download failed -----\n"
|
||||
curl -L $RELEASE_URL/$RELEASE_SOURCEBALL --output InvokeAI.tgz
|
||||
_err_exit $? _err_msg
|
||||
_err_msg="\n----- InvokeAI source unpack failed -----\n"
|
||||
tar -zxf InvokeAI.tgz
|
||||
_err_exit $? _err_msg
|
||||
|
||||
conda activate
|
||||
rm -f InvokeAI.tgz
|
||||
|
||||
if [ "$OS_NAME" == "mac" ]; then
|
||||
PIP_EXISTS_ACTION=w CONDA_SUBDIR=osx-${OS_ARCH} conda env create -f environment-mac.yml
|
||||
else
|
||||
conda env remove -n invokeai
|
||||
conda env create -f environment.yml
|
||||
fi
|
||||
_err_msg="\n----- InvokeAI source copy failed -----\n"
|
||||
cd InvokeAI-*
|
||||
cp -r . ..
|
||||
_err_exit $? _err_msg
|
||||
cd ..
|
||||
|
||||
status=$?
|
||||
# cleanup
|
||||
rm -rf InvokeAI-*/
|
||||
rm -rf .dev_scripts/ .github/ docker-build/ tests/ requirements.in requirements-mkdocs.txt shell.nix
|
||||
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
else
|
||||
conda activate invokeai
|
||||
# preload the models
|
||||
echo "Calling the preload_models.py script"
|
||||
python scripts/preload_models.py
|
||||
status=$?
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. Try again by running"
|
||||
echo "update.sh in this directory."
|
||||
else
|
||||
# tell the user their next steps
|
||||
echo "You can now start generating images by running invoke.sh (inside this folder), using ./invoke.sh"
|
||||
fi
|
||||
fi
|
||||
echo -e "\n***** Unpacked InvokeAI source *****\n"
|
||||
|
||||
conda activate invokeai
|
||||
# Download/unpack/clean up python-build-standalone
|
||||
_err_msg="\n----- Python download failed -----\n"
|
||||
curl -L $PYTHON_BUILD_STANDALONE_URL/$PYTHON_BUILD_STANDALONE --output python.tgz
|
||||
_err_exit $? _err_msg
|
||||
_err_msg="\n----- Python unpack failed -----\n"
|
||||
tar -zxf python.tgz
|
||||
_err_exit $? _err_msg
|
||||
|
||||
rm -f python.tgz
|
||||
|
||||
echo -e "\n***** Unpacked python-build-standalone *****\n"
|
||||
|
||||
# create venv
|
||||
_err_msg="\n----- problem creating venv -----\n"
|
||||
./python/bin/python3 -E -s -m venv .venv
|
||||
_err_exit $? _err_msg
|
||||
# In reality, the following is ALL that 'activate.bat' does,
|
||||
# aside from setting the prompt, which we don't care about
|
||||
export PYTHONPATH=
|
||||
export PATH=.venv/bin:$PATH
|
||||
|
||||
echo -e "\n***** Created Python virtual environment *****\n"
|
||||
|
||||
# Print venv's Python version
|
||||
_err_msg="\n----- problem calling venv's python -----\n"
|
||||
echo -e "We're running under"
|
||||
.venv/bin/python3 --version
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- pip update failed -----\n"
|
||||
.venv/bin/python3 -m pip install --no-cache-dir --no-warn-script-location --upgrade pip
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Updated pip *****\n"
|
||||
|
||||
_err_msg="\n----- requirements file copy failed -----\n"
|
||||
cp installer/py3.10-${OS_NAME}-"${OS_ARCH}"-${CD}-reqs.txt requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- main pip install failed -----\n"
|
||||
.venv/bin/python3 -m pip install --no-cache-dir --no-warn-script-location -r requirements.txt
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- clipseg install failed -----\n"
|
||||
.venv/bin/python3 -m pip install --no-cache-dir --no-warn-script-location git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||
_err_exit $? _err_msg
|
||||
|
||||
_err_msg="\n----- InvokeAI setup failed -----\n"
|
||||
.venv/bin/python3 -m pip install --no-cache-dir --no-warn-script-location -e .
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Installed Python dependencies *****\n"
|
||||
|
||||
# preload the models
|
||||
.venv/bin/python3 scripts/preload_models.py
|
||||
_err_msg="\n----- model download clone failed -----\n"
|
||||
_err_exit $? _err_msg
|
||||
|
||||
echo -e "\n***** Finished downloading models *****\n"
|
||||
|
||||
echo -e "\n***** Installing invoke.sh ******\n"
|
||||
cp installer/invoke.sh .
|
||||
|
||||
# more cleanup
|
||||
rm -rf installer/ installer_files/
|
||||
|
||||
echo "All done! Run the command './invoke.sh' to start InvokeAI."
|
||||
read -p "Press any key to exit..."
|
||||
exit
|
||||
|
27
installer/invoke.bat
Normal file
@ -0,0 +1,27 @@
|
||||
@echo off
|
||||
|
||||
set PATH=c:\windows\system32
|
||||
set PATH=.venv\Scripts;%PATH%
|
||||
|
||||
echo Do you want to generate images using the
|
||||
echo 1. command-line
|
||||
echo 2. browser-based UI
|
||||
echo 3. open the developer console
|
||||
set /P restore="Please enter 1, 2 or 3: "
|
||||
IF /I "%restore%" == "1" (
|
||||
echo Starting the InvokeAI command-line..
|
||||
.venv\Scripts\python scripts\invoke.py
|
||||
) ELSE IF /I "%restore%" == "2" (
|
||||
echo Starting the InvokeAI browser-based UI..
|
||||
.venv\Scripts\python scripts\invoke.py --web
|
||||
) ELSE IF /I "%restore%" == "3" (
|
||||
echo Developer Console
|
||||
call where python
|
||||
call python --version
|
||||
|
||||
cmd /k
|
||||
) ELSE (
|
||||
echo Invalid selection
|
||||
pause
|
||||
exit /b
|
||||
)
|
24
installer/invoke.sh
Executable file
@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
PATH=.venv/scripts:$PATH
|
||||
|
||||
if [ "$0" != "bash" ]; then
|
||||
echo "Do you want to generate images using the"
|
||||
echo "1. command-line"
|
||||
echo "2. browser-based UI"
|
||||
echo "3. open the developer console"
|
||||
read -p "Please enter 1, 2, or 3: " yn
|
||||
case $yn in
|
||||
1 ) printf "\nStarting the InvokeAI command-line..\n"; .venv/bin/python scripts/invoke.py;;
|
||||
2 ) printf "\nStarting the InvokeAI browser-based UI..\n"; .venv/bin/python scripts/invoke.py --web;;
|
||||
3 ) printf "\nDeveloper Console:\n"; file_name=$(basename "${BASH_SOURCE[0]}"); bash --init-file "$file_name";;
|
||||
* ) echo "Invalid selection"; exit;;
|
||||
esac
|
||||
else # in developer console
|
||||
python --version
|
||||
echo "Press ^D to exit"
|
||||
export PS1="(InvokeAI) \u@\h \w> "
|
||||
fi
|
2069
installer/py3.10-darwin-arm64-mps-reqs.txt
Normal file
2069
installer/py3.10-darwin-x86_64-cpu-reqs.txt
Normal file
2066
installer/py3.10-linux-x86_64-cuda-reqs.txt
Normal file
2072
installer/py3.10-windows-x86_64-cuda-reqs.txt
Normal file
@ -3,9 +3,15 @@ InvokeAI
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Installation on Windows:
|
||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
NOTE: You might need to enable Windows Long Paths. If you're not sure,
|
||||
then you almost certainly need to. Simply double-click the 'WinLongPathsEnabled.reg'
|
||||
file. Note that you will need to have admin privileges in order to
|
||||
do this.
|
||||
|
||||
Please double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh' file (on Linux/Mac) to start InvokeAI.
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh'
|
||||
file (on Linux/Mac) to start InvokeAI.
|
||||
|
24
installer/requirements.in
Normal file
@ -0,0 +1,24 @@
|
||||
--prefer-binary
|
||||
--extra-index-url https://download.pytorch.org/whl/cu116
|
||||
--trusted-host https://download.pytorch.org
|
||||
albumentations
|
||||
diffusers
|
||||
eventlet
|
||||
flask_cors
|
||||
flask_socketio
|
||||
flaskwebgui
|
||||
getpass_asterisk
|
||||
imageio-ffmpeg
|
||||
pyreadline3
|
||||
realesrgan
|
||||
send2trash
|
||||
streamlit
|
||||
taming-transformers-rom1504
|
||||
test-tube
|
||||
torch-fidelity
|
||||
torchvision==0.13.1 ; platform_system == 'Darwin'
|
||||
torchvision==0.13.1+cu116 ; platform_system == 'Linux' or platform_system == 'Windows'
|
||||
transformers
|
||||
https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip
|
||||
https://github.com/TencentARC/GFPGAN/archive/2eac2033893ca7f427f4035d80fe95b92649ac56.zip
|
||||
https://github.com/invoke-ai/k-diffusion/archive/7f16b2c33411f26b3eae78d10648d625cb0c1095.zip
|
@ -567,9 +567,8 @@ class Generate:
|
||||
seed = opt.seed or args.seed
|
||||
if seed is None or seed < 0:
|
||||
seed = random.randrange(0, np.iinfo(np.uint32).max)
|
||||
|
||||
|
||||
prompt = opt.prompt or args.prompt or ''
|
||||
|
||||
print(f'>> using seed {seed} and prompt "{prompt}" for {image_path}')
|
||||
|
||||
# try to reuse the same filename prefix as the original file.
|
||||
|
@ -47,7 +47,6 @@ def get_uc_and_c_and_ec(prompt_string_uncleaned, model, log_tokens=False, skip_n
|
||||
parsed_prompt = pp.parse_conjunction(prompt_string_cleaned).prompts[0]
|
||||
|
||||
parsed_negative_prompt: FlattenedPrompt = pp.parse_conjunction(unconditioned_words).prompts[0]
|
||||
print(f">> Parsed prompt to {parsed_prompt}")
|
||||
|
||||
conditioning = None
|
||||
cac_args:CrossAttentionControl.Arguments = None
|
||||
|
@ -169,7 +169,8 @@ class Inpaint(Img2Img):
|
||||
# Fill missing areas of original image
|
||||
init_filled = self.tile_fill_missing(
|
||||
self.pil_image.copy(),
|
||||
seed = self.seed if self.seed >= 0 else self.new_seed(),
|
||||
seed = self.seed if (self.seed is not None
|
||||
and self.seed >= 0) else self.new_seed(),
|
||||
tile_size = tile_size
|
||||
)
|
||||
init_filled.paste(init_image, (0,0), init_image.split()[-1])
|
||||
|
@ -72,6 +72,7 @@ class GFPGAN():
|
||||
image = image.resize(res.size)
|
||||
res = Image.blend(image, res, strength)
|
||||
|
||||
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.empty_cache()
|
||||
self.gfpgan = None
|
||||
|
@ -35,8 +35,8 @@ from PIL import Image, ImageOps
|
||||
from torchvision import transforms
|
||||
|
||||
CLIP_VERSION = 'ViT-B/16'
|
||||
CLIPSEG_WEIGHTS = 'src/clipseg/weights/rd64-uni.pth'
|
||||
CLIPSEG_WEIGHTS_REFINED = 'src/clipseg/weights/rd64-uni-refined.pth'
|
||||
CLIPSEG_WEIGHTS = 'models/clipseg/clipseg_weights/rd64-uni.pth'
|
||||
CLIPSEG_WEIGHTS_REFINED = 'models/clipseg/clipseg_weights/rd64-uni-refined.pth'
|
||||
CLIPSEG_SIZE = 352
|
||||
|
||||
class SegmentedGrayscale(object):
|
||||
|
@ -1,3 +0,0 @@
|
||||
[tool.blue]
|
||||
line-length = 90
|
||||
target-version = ['py310']
|
@ -1,27 +0,0 @@
|
||||
albumentations==0.4.3
|
||||
einops==0.3.0
|
||||
diffusers==0.6.0
|
||||
huggingface-hub==0.8.1
|
||||
imageio==2.9.0
|
||||
imageio-ffmpeg==0.4.2
|
||||
kornia==0.6.0
|
||||
numpy==1.23.1
|
||||
--pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cpu
|
||||
omegaconf==2.1.1
|
||||
opencv-python==4.6.0.66
|
||||
pillow==9.2.0
|
||||
pudb==2019.2
|
||||
torch==1.12.1
|
||||
torchvision==0.13.0
|
||||
pytorch-lightning==1.7.7
|
||||
streamlit==1.12.0
|
||||
test-tube>=0.7.5
|
||||
torch-fidelity==0.3.0
|
||||
torchmetrics==0.6.0
|
||||
transformers==4.21.3
|
||||
-e git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||
-e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
||||
-e git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion
|
||||
-e git+https://github.com/TencentARC/GFPGAN.git#egg=gfpgan
|
||||
-e git+https://github.com/invoke-ai/clipseg.git@models-rename#egg=clipseg
|
||||
-e .
|
@ -613,6 +613,7 @@ def do_textmask(gen, opt, callback):
|
||||
image_path = os.path.join(opt.outdir,image_path)
|
||||
assert os.path.exists(image_path), '** "{opt.prompt}" not found. Please enter the name of an existing image file to mask **'
|
||||
assert opt.text_mask is not None and len(opt.text_mask) >= 1, '** Please provide a text mask with -tm **'
|
||||
opt.input_file_path = image_path
|
||||
tm = opt.text_mask[0]
|
||||
threshold = float(opt.text_mask[1]) if len(opt.text_mask) > 1 else 0.5
|
||||
gen.apply_textmask(
|
||||
@ -633,6 +634,7 @@ def do_postprocess (gen, opt, callback):
|
||||
file_path = os.path.join(opt.outdir,file_path)
|
||||
|
||||
opt.input_file_path = file_path
|
||||
|
||||
tool=None
|
||||
if opt.facetool_strength > 0:
|
||||
tool = opt.facetool
|
||||
|
@ -104,13 +104,15 @@ def postscript():
|
||||
print(
|
||||
'''\n** Model Installation Successful **\nYou're all set! You may now launch InvokeAI using one of these two commands:
|
||||
Web version:
|
||||
|
||||
python scripts/invoke.py --web (connect to http://localhost:9090)
|
||||
|
||||
Command-line version:
|
||||
|
||||
python scripts/invoke.py
|
||||
|
||||
Remember to activate that 'invokeai' environment before running invoke.py.
|
||||
|
||||
Or, if you used one of the automated installers, execute "invoke.sh" (Linux/Mac)
|
||||
or "invoke.bat" (Windows) to start the script.
|
||||
|
||||
Have fun!
|
||||
'''
|
||||
)
|
||||
@ -415,7 +417,7 @@ def download_kornia():
|
||||
|
||||
#---------------------------------------------
|
||||
def download_clip():
|
||||
print('Loading CLIP model...',end='')
|
||||
print('Loading CLIP model (ignore deprecation errors)...',end='')
|
||||
sys.stdout.flush()
|
||||
version = 'openai/clip-vit-large-patch14'
|
||||
tokenizer = CLIPTokenizer.from_pretrained(version)
|
||||
@ -424,7 +426,7 @@ def download_clip():
|
||||
|
||||
#---------------------------------------------
|
||||
def download_gfpgan():
|
||||
print('Installing models from RealESRGAN and facexlib...',end='')
|
||||
print('Installing models from RealESRGAN and facexlib (ignore deprecation errors)...',end='')
|
||||
try:
|
||||
from realesrgan import RealESRGANer
|
||||
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
||||
@ -442,7 +444,7 @@ def download_gfpgan():
|
||||
print('Error loading ESRGAN:')
|
||||
print(traceback.format_exc())
|
||||
|
||||
print('Loading models from GFPGAN')
|
||||
print('Loading models from GFPGAN...',end='')
|
||||
for model in (
|
||||
[
|
||||
'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth',
|
||||
@ -489,22 +491,23 @@ def download_clipseg():
|
||||
import zipfile
|
||||
try:
|
||||
model_url = 'https://owncloud.gwdg.de/index.php/s/ioHbRzFx6th32hn/download'
|
||||
model_dest = 'src/clipseg/clipseg_weights.zip'
|
||||
weights_dir = 'src/clipseg/weights'
|
||||
if not os.path.exists(weights_dir):
|
||||
model_dest = 'models/clipseg/clipseg_weights'
|
||||
weights_zip = 'models/clipseg/weights.zip'
|
||||
|
||||
if not os.path.exists(model_dest):
|
||||
os.makedirs(os.path.dirname(model_dest), exist_ok=True)
|
||||
if not os.path.exists('src/clipseg/weights/rd64-uni-refined.pth'):
|
||||
request.urlretrieve(model_url,model_dest)
|
||||
with zipfile.ZipFile(model_dest,'r') as zip:
|
||||
zip.extractall('src/clipseg')
|
||||
os.rename('src/clipseg/clipseg_weights','src/clipseg/weights')
|
||||
os.remove(model_dest)
|
||||
from clipseg_models.clipseg import CLIPDensePredT
|
||||
if not os.path.exists(f'{model_dest}/rd64-uni-refined.pth'):
|
||||
request.urlretrieve(model_url,weights_zip)
|
||||
with zipfile.ZipFile(weights_zip,'r') as zip:
|
||||
zip.extractall('models/clipseg')
|
||||
os.remove(weights_zip)
|
||||
|
||||
from clipseg.clipseg import CLIPDensePredT
|
||||
model = CLIPDensePredT(version='ViT-B/16', reduce_dim=64, )
|
||||
model.eval()
|
||||
model.load_state_dict(
|
||||
torch.load(
|
||||
'src/clipseg/weights/rd64-uni-refined.pth',
|
||||
'models/clipseg/clipseg_weights/rd64-uni-refined.pth',
|
||||
map_location=torch.device('cpu')
|
||||
),
|
||||
strict=False,
|
||||
|
3
setup.py
@ -3,7 +3,7 @@ from setuptools import setup, find_packages
|
||||
setup(
|
||||
name='invoke-ai',
|
||||
version='2.1.3',
|
||||
description='',
|
||||
description='InvokeAI text to image generation toolkit',
|
||||
packages=find_packages(),
|
||||
install_requires=[
|
||||
'torch',
|
||||
@ -11,3 +11,4 @@ setup(
|
||||
'tqdm',
|
||||
],
|
||||
)
|
||||
|
||||
|
23
source_installer/create_installers.sh
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
# make the installer zip for linux and mac
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.sh invokeAI
|
||||
cp readme.txt invokeAI
|
||||
|
||||
zip -r invokeAI-src-installer-linux.zip invokeAI
|
||||
zip -r invokeAI-src-installer-mac.zip invokeAI
|
||||
|
||||
# make the installer zip for windows
|
||||
rm -rf invokeAI
|
||||
mkdir -p invokeAI
|
||||
cp install.bat invokeAI
|
||||
cp readme.txt invokeAI
|
||||
cp WinLongPathsEnabled.reg invokeAI
|
||||
|
||||
zip -r invokeAI-src-installer-windows.zip invokeAI
|
||||
|
||||
echo "The installer zips are ready to be distributed.."
|
118
source_installer/install.bat
Normal file
@ -0,0 +1,118 @@
|
||||
@echo off
|
||||
|
||||
@rem This script will install git and conda (if not found on the PATH variable)
|
||||
@rem using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
@rem For users who already have git and conda, this step will be skipped.
|
||||
|
||||
@rem Next, it'll checkout the project's git repo, if necessary.
|
||||
@rem Finally, it'll create the conda environment and preload the models.
|
||||
|
||||
@rem This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo.
|
||||
|
||||
@rem config
|
||||
set MAMBA_ROOT_PREFIX=%cd%\installer_files\mamba
|
||||
set INSTALL_ENV_DIR=%cd%\installer_files\env
|
||||
set MICROMAMBA_DOWNLOAD_URL=https://github.com/cmdr2/stable-diffusion-ui/releases/download/v1.1/micromamba.exe
|
||||
set REPO_URL=https://github.com/invoke-ai/InvokeAI.git
|
||||
set umamba_exists=F
|
||||
@rem Change the download URL to an InvokeAI repo's release URL
|
||||
|
||||
@rem figure out whether git and conda needs to be installed
|
||||
if exist "%INSTALL_ENV_DIR%" set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
set PACKAGES_TO_INSTALL=
|
||||
|
||||
call conda --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% conda
|
||||
|
||||
call git --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" NEQ "0" set PACKAGES_TO_INSTALL=%PACKAGES_TO_INSTALL% git
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version >.tmp1 2>.tmp2
|
||||
if "%ERRORLEVEL%" EQU "0" set umamba_exists=T
|
||||
|
||||
@rem (if necessary) install git and conda into a contained environment
|
||||
if "%PACKAGES_TO_INSTALL%" NEQ "" (
|
||||
@rem download micromamba
|
||||
if "%umamba_exists%" == "F" (
|
||||
echo "Downloading micromamba from %MICROMAMBA_DOWNLOAD_URL% to %MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
mkdir "%MAMBA_ROOT_PREFIX%"
|
||||
call curl -L "%MICROMAMBA_DOWNLOAD_URL%" > "%MAMBA_ROOT_PREFIX%\micromamba.exe"
|
||||
|
||||
@rem test the mamba binary
|
||||
echo Micromamba version:
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" --version
|
||||
)
|
||||
|
||||
@rem create the installer env
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" create -y --prefix "%INSTALL_ENV_DIR%"
|
||||
)
|
||||
|
||||
echo "Packages to install:%PACKAGES_TO_INSTALL%"
|
||||
|
||||
call "%MAMBA_ROOT_PREFIX%\micromamba.exe" install -y --prefix "%INSTALL_ENV_DIR%" -c conda-forge %PACKAGES_TO_INSTALL%
|
||||
|
||||
if not exist "%INSTALL_ENV_DIR%" (
|
||||
echo "There was a problem while installing%PACKAGES_TO_INSTALL% using micromamba. Cannot continue."
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
)
|
||||
|
||||
set PATH=%INSTALL_ENV_DIR%;%INSTALL_ENV_DIR%\Library\bin;%INSTALL_ENV_DIR%\Scripts;%INSTALL_ENV_DIR%\Library\usr\bin;%PATH%
|
||||
|
||||
@rem get the repo (and load into the current directory)
|
||||
if not exist ".git" (
|
||||
call git init
|
||||
call git config --local init.defaultBranch main
|
||||
call git remote add origin %REPO_URL%
|
||||
call git fetch
|
||||
call git checkout origin/main -ft
|
||||
)
|
||||
|
||||
@rem activate the base env
|
||||
call conda activate
|
||||
|
||||
@rem create the environment
|
||||
call conda env remove -n invokeai
|
||||
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||
call conda env create
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "Something went wrong while installing Python libraries and cannot continue.
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods."
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
copy source_installer\invoke.bat invoke.bat
|
||||
copy source_installer\update.bat update.bat
|
||||
|
||||
call conda activate invokeai
|
||||
@rem preload the models
|
||||
call python scripts\preload_models.py
|
||||
if "%ERRORLEVEL%" NEQ "0" (
|
||||
echo ""
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. To run preload_models.py again,"
|
||||
echo "run the command 'update.bat' in this directory."
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit /b
|
||||
)
|
||||
|
||||
@rem tell the user their next steps
|
||||
echo ""
|
||||
echo "* InvokeAI installed successfully *"
|
||||
echo "You can now start generating images by double-clicking the 'invoke.bat' file (inside this folder)
|
||||
echo "Press any key to continue"
|
||||
pause
|
||||
exit 0
|
||||
|
138
source_installer/install.sh
Executable file
@ -0,0 +1,138 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script will install git and conda (if not found on the PATH variable)
|
||||
# using micromamba (an 8mb static-linked single-file binary, conda replacement).
|
||||
# For users who already have git and conda, this step will be skipped.
|
||||
|
||||
# Next, it'll checkout the project's git repo, if necessary.
|
||||
# Finally, it'll create the conda environment and preload the models.
|
||||
|
||||
# This enables a user to install this project without manually installing conda and git.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
echo "Installing InvokeAI.."
|
||||
echo ""
|
||||
|
||||
OS_NAME=$(uname -s)
|
||||
case "${OS_NAME}" in
|
||||
Linux*) OS_NAME="linux";;
|
||||
Darwin*) OS_NAME="mac";;
|
||||
*) echo "Unknown OS: $OS_NAME! This script runs only on Linux or Mac" && exit
|
||||
esac
|
||||
|
||||
OS_ARCH=$(uname -m)
|
||||
case "${OS_ARCH}" in
|
||||
x86_64*) OS_ARCH="64";;
|
||||
arm64*) OS_ARCH="arm64";;
|
||||
*) echo "Unknown system architecture: $OS_ARCH! This script runs only on x86_64 or arm64" && exit
|
||||
esac
|
||||
|
||||
# https://mamba.readthedocs.io/en/latest/installation.html
|
||||
if [ "$OS_NAME" == "linux" ] && [ "$OS_ARCH" == "arm64" ]; then OS_ARCH="aarch64"; fi
|
||||
|
||||
# config
|
||||
export MAMBA_ROOT_PREFIX="$(pwd)/installer_files/mamba"
|
||||
INSTALL_ENV_DIR="$(pwd)/installer_files/env"
|
||||
MICROMAMBA_DOWNLOAD_URL="https://micro.mamba.pm/api/micromamba/${OS_NAME}-${OS_ARCH}/latest"
|
||||
REPO_URL="https://github.com/invoke-ai/InvokeAI.git"
|
||||
umamba_exists="F"
|
||||
|
||||
# figure out whether git and conda needs to be installed
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
PACKAGES_TO_INSTALL=""
|
||||
if ! $(which conda) -V &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL conda"; fi
|
||||
if ! which git &>/dev/null; then PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL git"; fi
|
||||
|
||||
if "$MAMBA_ROOT_PREFIX/micromamba" --version &>/dev/null; then umamba_exists="T"; fi
|
||||
|
||||
# (if necessary) install git and conda into a contained environment
|
||||
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
|
||||
# download micromamba
|
||||
if [ "$umamba_exists" == "F" ]; then
|
||||
echo "Downloading micromamba from $MICROMAMBA_DOWNLOAD_URL to $MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
mkdir -p "$MAMBA_ROOT_PREFIX"
|
||||
curl -L "$MICROMAMBA_DOWNLOAD_URL" | tar -xvj bin/micromamba -O > "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
chmod u+x "$MAMBA_ROOT_PREFIX/micromamba"
|
||||
|
||||
# test the mamba binary
|
||||
echo "Micromamba version:"
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" --version
|
||||
fi
|
||||
|
||||
# create the installer env
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" create -y --prefix "$INSTALL_ENV_DIR"
|
||||
fi
|
||||
|
||||
echo "Packages to install:$PACKAGES_TO_INSTALL"
|
||||
|
||||
"$MAMBA_ROOT_PREFIX/micromamba" install -y --prefix "$INSTALL_ENV_DIR" -c conda-forge $PACKAGES_TO_INSTALL
|
||||
|
||||
if [ ! -e "$INSTALL_ENV_DIR" ]; then
|
||||
echo "There was a problem while initializing micromamba. Cannot continue."
|
||||
exit
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -e "$INSTALL_ENV_DIR" ]; then export PATH="$INSTALL_ENV_DIR/bin:$PATH"; fi
|
||||
|
||||
# get the repo (and load into the current directory)
|
||||
if [ ! -e ".git" ]; then
|
||||
git init
|
||||
git config --local init.defaultBranch main
|
||||
git remote add origin "$REPO_URL"
|
||||
git fetch
|
||||
git checkout origin/main -ft
|
||||
fi
|
||||
|
||||
# create the environment
|
||||
CONDA_BASEPATH=$(conda info --base)
|
||||
source "$CONDA_BASEPATH/etc/profile.d/conda.sh" # otherwise conda complains about 'shell not initialized' (needed when running in a script)
|
||||
|
||||
conda activate
|
||||
if [ "$OS_NAME" == "mac" ]; then
|
||||
echo "Macintosh system detected. Installing MPS and CPU support."
|
||||
ln -sf environments-and-requirements/environment-mac.yml environment.yml
|
||||
else
|
||||
if (lsmod | grep amdgpu) &>/dev/null ; then
|
||||
echo "Linux system with AMD GPU driver detected. Installing ROCm and CPU support"
|
||||
ln -sf environments-and-requirements/environment-lin-amd.yml environment.yml
|
||||
else
|
||||
echo "Linux system detected. Installing CUDA and CPU support."
|
||||
ln -sf environments-and-requirements/environment-lin-cuda.yml environment.yml
|
||||
fi
|
||||
fi
|
||||
conda env update
|
||||
|
||||
status=$?
|
||||
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "Something went wrong while installing Python libraries and cannot continue."
|
||||
echo "Please visit https://invoke-ai.github.io/InvokeAI/#installation for alternative"
|
||||
echo "installation methods"
|
||||
else
|
||||
ln -sf ./source_installer/invoke.sh .
|
||||
ln -sf ./source_installer/update.sh .
|
||||
|
||||
conda activate invokeai
|
||||
# preload the models
|
||||
echo "Calling the preload_models.py script"
|
||||
python scripts/preload_models.py
|
||||
status=$?
|
||||
if test $status -ne 0
|
||||
then
|
||||
echo "The preload_models.py script crashed or was cancelled."
|
||||
echo "InvokeAI is not ready to run. Try again by running"
|
||||
echo "update.sh in this directory."
|
||||
else
|
||||
# tell the user their next steps
|
||||
echo "You can now start generating images by running invoke.sh (inside this folder), using ./invoke.sh"
|
||||
fi
|
||||
fi
|
||||
|
||||
conda activate invokeai
|
@ -23,6 +23,7 @@ if [ "$0" != "bash" ]; then
|
||||
* ) echo "Invalid selection"; exit;;
|
||||
esac
|
||||
else # in developer console
|
||||
which python
|
||||
python --version
|
||||
echo "Press ^D to exit"
|
||||
export PS1="(InvokeAI) \u@\h \w> "
|
||||
fi
|
16
source_installer/readme.txt
Normal file
@ -0,0 +1,16 @@
|
||||
InvokeAI
|
||||
|
||||
Project homepage: https://github.com/invoke-ai/InvokeAI
|
||||
|
||||
Installation on Windows:
|
||||
You may need to enable Windows Long Paths to install InvokeAI. If you're not
|
||||
sure what this is, you almost certainly need to do this. Simply double-click the
|
||||
"WinLongPathsEnabled.reg" file located in this directory, and approve the Windows
|
||||
warnings. Note that you will need to have admin privileges in order to do this.
|
||||
|
||||
Then double-click the 'install.bat' file (while keeping it inside the invokeAI folder).
|
||||
|
||||
Installation on Linux and Mac:
|
||||
Please open the terminal, and run './install.sh' (while keeping it inside the invokeAI folder).
|
||||
|
||||
After installation, please run the 'invoke.bat' file (on Windows) or 'invoke.sh' file (on Linux/Mac) to start InvokeAI.
|
@ -22,3 +22,5 @@ case "${OS_NAME}" in
|
||||
esac
|
||||
|
||||
python scripts/preload_models.py
|
||||
|
||||
|