mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge remote-tracking branch 'origin/main' into dev/diffusers
# Conflicts: # environments-and-requirements/environment-lin-amd.yml # environments-and-requirements/environment-lin-cuda.yml # environments-and-requirements/environment-win-cuda.yml # environments-and-requirements/requirements-base.txt # ldm/invoke/generator/img2img.py # ldm/invoke/generator/inpaint.py # ldm/invoke/generator/txt2img2img.py
This commit is contained in:
commit
bc515e243b
@ -1,16 +1,13 @@
|
|||||||
*
|
*
|
||||||
!backend
|
!backend
|
||||||
|
!environments-and-requirements
|
||||||
!frontend
|
!frontend
|
||||||
!binary_installer
|
|
||||||
!ldm
|
!ldm
|
||||||
!main.py
|
!main.py
|
||||||
!scripts
|
!scripts
|
||||||
!server
|
!server
|
||||||
!static
|
!static
|
||||||
!setup.py
|
!setup.py
|
||||||
!docker-build
|
|
||||||
!docs
|
|
||||||
docker-build/Dockerfile
|
|
||||||
|
|
||||||
# Guard against pulling in any models that might exist in the directory tree
|
# Guard against pulling in any models that might exist in the directory tree
|
||||||
**/*.pt*
|
**/*.pt*
|
||||||
@ -19,8 +16,4 @@ docker-build/Dockerfile
|
|||||||
!configs
|
!configs
|
||||||
configs/models.yaml
|
configs/models.yaml
|
||||||
|
|
||||||
# unignore environment dirs/files, but ignore the environment.yml file or symlink in case it exists
|
|
||||||
!environment*
|
|
||||||
environment.yml
|
|
||||||
|
|
||||||
**/__pycache__
|
**/__pycache__
|
||||||
|
20
.github/workflows/build-cloud-img.yml
vendored
20
.github/workflows/build-cloud-img.yml
vendored
@ -1,15 +1,15 @@
|
|||||||
name: Build and push cloud image
|
name: Build and push cloud image
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
push:
|
# push:
|
||||||
branches:
|
# branches:
|
||||||
- main
|
# - main
|
||||||
tags:
|
# tags:
|
||||||
- v*
|
# - v*
|
||||||
# we will NOT push the image on pull requests, only test buildability.
|
# # we will NOT push the image on pull requests, only test buildability.
|
||||||
pull_request:
|
# pull_request:
|
||||||
branches:
|
# branches:
|
||||||
- main
|
# - main
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@ -82,6 +82,6 @@ jobs:
|
|||||||
file: docker-build/Dockerfile.cloud
|
file: docker-build/Dockerfile.cloud
|
||||||
platforms: Linux/${{ matrix.arch }}
|
platforms: Linux/${{ matrix.arch }}
|
||||||
# do not push the image on PRs
|
# do not push the image on PRs
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: false
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
28
.github/workflows/lint-frontend.yml
vendored
Normal file
28
.github/workflows/lint-frontend.yml
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
name: Lint frontend
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'frontend/**'
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- 'frontend/**'
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: frontend
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint-frontend:
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
steps:
|
||||||
|
- name: Setup Node 18
|
||||||
|
uses: actions/setup-node@v3
|
||||||
|
with:
|
||||||
|
node-version: '18'
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- run: 'yarn install --frozen-lockfile'
|
||||||
|
- run: 'yarn tsc'
|
||||||
|
- run: 'yarn run madge'
|
||||||
|
- run: 'yarn run lint --max-warnings=0'
|
||||||
|
- run: 'yarn run prettier --check'
|
19
.github/workflows/pyflakes.yml
vendored
Normal file
19
.github/workflows/pyflakes.yml
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- development
|
||||||
|
- 'release-candidate-*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
pyflakes:
|
||||||
|
name: runner / pyflakes
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: pyflakes
|
||||||
|
uses: reviewdog/action-pyflakes@v1
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
reporter: github-pr-review
|
@ -51,7 +51,7 @@ requests. Be sure to use the provided templates. They will help us diagnose issu
|
|||||||
For full installation and upgrade instructions, please see:
|
For full installation and upgrade instructions, please see:
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||||
|
|
||||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.2.3)
|
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/latest)
|
||||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||||
3. Unzip the file.
|
3. Unzip the file.
|
||||||
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
|
4. If you are on Windows, double-click on the `install.bat` script. On macOS, open a Terminal window, drag the file `install.sh` from Finder into the Terminal, and press return. On Linux, run `install.sh`.
|
||||||
@ -167,10 +167,7 @@ To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the
|
|||||||
|
|
||||||
If you are unfamiliar with how
|
If you are unfamiliar with how
|
||||||
to contribute to GitHub projects, here is a
|
to contribute to GitHub projects, here is a
|
||||||
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress, but for now the most
|
[Getting Started Guide](https://opensource.com/article/19/7/create-pull-request-github). A full set of contribution guidelines, along with templates, are in progress. You can **make your pull request against the "main" branch**.
|
||||||
important thing is to **make your pull request against the "development" branch**, and not against
|
|
||||||
"main". This will help keep public breakage to a minimum and will allow you to propose more radical
|
|
||||||
changes.
|
|
||||||
|
|
||||||
We hope you enjoy using our software as much as we enjoy creating it,
|
We hope you enjoy using our software as much as we enjoy creating it,
|
||||||
and we hope that some of those of you who are reading this will elect
|
and we hope that some of those of you who are reading this will elect
|
||||||
|
@ -14,9 +14,10 @@ RUN apt-get update \
|
|||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# set workdir, PATH and copy sources
|
# set WORKDIR, PATH and copy sources
|
||||||
WORKDIR /usr/src/app
|
ARG WORKDIR=/usr/src/app
|
||||||
ENV PATH /usr/src/app/.venv/bin:$PATH
|
WORKDIR ${WORKDIR}
|
||||||
|
ENV PATH ${WORKDIR}/.venv/bin:$PATH
|
||||||
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
|
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
|
||||||
COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./
|
COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./
|
||||||
|
|
||||||
@ -38,18 +39,28 @@ FROM python:3.10-slim AS runtime
|
|||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
--no-install-recommends \
|
--no-install-recommends \
|
||||||
|
build-essential=12.9 \
|
||||||
libgl1-mesa-glx=20.3.* \
|
libgl1-mesa-glx=20.3.* \
|
||||||
libglib2.0-0=2.66.* \
|
libglib2.0-0=2.66.* \
|
||||||
|
libopencv-dev=4.5.* \
|
||||||
|
python3-opencv=4.5.* \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
# setup environment
|
||||||
COPY --from=builder /usr/src/app .
|
ARG WORKDIR=/usr/src/app
|
||||||
|
WORKDIR ${WORKDIR}
|
||||||
# set Environment, Entrypoint and default CMD
|
COPY --from=builder ${WORKDIR} .
|
||||||
|
ENV PATH=${WORKDIR}/.venv/bin:$PATH
|
||||||
ENV INVOKEAI_ROOT /data
|
ENV INVOKEAI_ROOT /data
|
||||||
ENV INVOKE_MODEL_RECONFIGURE --yes
|
ENV INVOKE_MODEL_RECONFIGURE --yes
|
||||||
ENV PATH=/usr/src/app/.venv/bin:$PATH
|
|
||||||
|
|
||||||
|
# Initialize patchmatch
|
||||||
|
RUN ln -sf \
|
||||||
|
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv4.pc \
|
||||||
|
/usr/lib/"$(arch)"-linux-gnu/pkgconfig/opencv.pc \
|
||||||
|
&& python3 -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
|
# set Entrypoint and default CMD
|
||||||
ENTRYPOINT [ "python3", "scripts/invoke.py" ]
|
ENTRYPOINT [ "python3", "scripts/invoke.py" ]
|
||||||
CMD [ "--web", "--host=0.0.0.0" ]
|
CMD [ "--web", "--host=0.0.0.0" ]
|
||||||
|
@ -1,49 +1,35 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
|
||||||
# configure values by using env when executing build.sh f.e. `env ARCH=aarch64 ./build.sh`
|
|
||||||
|
|
||||||
source ./docker-build/env.sh \
|
source ./docker-build/env.sh \
|
||||||
|| echo "please execute docker-build/build.sh from repository root" \
|
|| echo "please execute docker-build/build.sh from repository root" \
|
||||||
|| exit 1
|
|| exit 1
|
||||||
|
|
||||||
pip_requirements=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
|
PIP_REQUIREMENTS=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
|
||||||
dockerfile=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
|
DOCKERFILE=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
|
||||||
|
|
||||||
# print the settings
|
# print the settings
|
||||||
echo -e "You are using these values:\n"
|
echo -e "You are using these values:\n"
|
||||||
echo -e "Dockerfile:\t ${dockerfile}"
|
echo -e "Dockerfile:\t ${DOCKERFILE}"
|
||||||
echo -e "requirements:\t ${pip_requirements}"
|
echo -e "Requirements:\t ${PIP_REQUIREMENTS}"
|
||||||
echo -e "volumename:\t ${volumename}"
|
echo -e "Volumename:\t ${VOLUMENAME}"
|
||||||
echo -e "arch:\t\t ${arch}"
|
echo -e "arch:\t\t ${ARCH}"
|
||||||
echo -e "platform:\t ${platform}"
|
echo -e "Platform:\t ${PLATFORM}"
|
||||||
echo -e "invokeai_tag:\t ${invokeai_tag}\n"
|
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
|
||||||
|
|
||||||
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
|
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
|
||||||
echo "Volume already exists"
|
echo -e "Volume already exists\n"
|
||||||
echo
|
|
||||||
else
|
else
|
||||||
echo -n "createing docker volume "
|
echo -n "createing docker volume "
|
||||||
docker volume create "${volumename}"
|
docker volume create "${VOLUMENAME}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Build Container
|
# Build Container
|
||||||
docker build \
|
docker build \
|
||||||
--platform="${platform}" \
|
--platform="${PLATFORM}" \
|
||||||
--tag="${invokeai_tag}" \
|
--tag="${INVOKEAI_TAG}" \
|
||||||
--build-arg="PIP_REQUIREMENTS=${pip_requirements}" \
|
--build-arg="PIP_REQUIREMENTS=${PIP_REQUIREMENTS}" \
|
||||||
--file="${dockerfile}" \
|
--file="${DOCKERFILE}" \
|
||||||
.
|
.
|
||||||
|
|
||||||
docker run \
|
|
||||||
--rm \
|
|
||||||
--platform="$platform" \
|
|
||||||
--name="$project_name" \
|
|
||||||
--hostname="$project_name" \
|
|
||||||
--mount="source=$volumename,target=/data" \
|
|
||||||
--mount="type=bind,source=$HOME/.huggingface,target=/root/.huggingface" \
|
|
||||||
--env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
|
|
||||||
--entrypoint="python3" \
|
|
||||||
"${invokeai_tag}" \
|
|
||||||
scripts/configure_invokeai.py --yes
|
|
||||||
|
@ -1,15 +1,9 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
project_name=${PROJECT_NAME:-invokeai}
|
# Variables shared by build.sh and run.sh
|
||||||
volumename=${VOLUMENAME:-${project_name}_data}
|
REPOSITORY_NAME=${REPOSITORY_NAME:-$(basename "$(git rev-parse --show-toplevel)")}
|
||||||
arch=${ARCH:-x86_64}
|
VOLUMENAME=${VOLUMENAME:-${REPOSITORY_NAME,,}_data}
|
||||||
platform=${PLATFORM:-Linux/${arch}}
|
ARCH=${ARCH:-$(arch)}
|
||||||
invokeai_tag=${INVOKEAI_TAG:-${project_name}:${arch}}
|
PLATFORM=${PLATFORM:-Linux/${ARCH}}
|
||||||
gpus=${GPU_FLAGS:+--gpus=${GPU_FLAGS}}
|
CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda}
|
||||||
|
INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-latest}
|
||||||
export project_name
|
|
||||||
export volumename
|
|
||||||
export arch
|
|
||||||
export platform
|
|
||||||
export invokeai_tag
|
|
||||||
export gpus
|
|
||||||
|
@ -1,21 +1,31 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
|
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
|
||||||
|
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
|
||||||
|
|
||||||
|
source ./docker-build/env.sh \
|
||||||
|
|| echo "please run from repository root" \
|
||||||
|
|| exit 1
|
||||||
|
|
||||||
|
# check if HUGGINGFACE_TOKEN is available
|
||||||
|
# You must have accepted the terms of use for required models
|
||||||
|
HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN:?Please set your token for Huggingface as HUGGINGFACE_TOKEN}
|
||||||
|
|
||||||
echo -e "You are using these values:\n"
|
echo -e "You are using these values:\n"
|
||||||
echo -e "volumename:\t ${volumename}"
|
echo -e "Volumename:\t ${VOLUMENAME}"
|
||||||
echo -e "invokeai_tag:\t ${invokeai_tag}\n"
|
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
|
||||||
|
|
||||||
docker run \
|
docker run \
|
||||||
--interactive \
|
--interactive \
|
||||||
--tty \
|
--tty \
|
||||||
--rm \
|
--rm \
|
||||||
--platform="$platform" \
|
--platform="$PLATFORM" \
|
||||||
--name="$project_name" \
|
--name="${REPOSITORY_NAME,,}" \
|
||||||
--hostname="$project_name" \
|
--hostname="${REPOSITORY_NAME,,}" \
|
||||||
--mount="source=$volumename,target=/data" \
|
--mount="source=$VOLUMENAME,target=/data" \
|
||||||
|
--env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
|
||||||
--publish=9090:9090 \
|
--publish=9090:9090 \
|
||||||
--cap-add=sys_nice \
|
--cap-add=sys_nice \
|
||||||
$gpus \
|
${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \
|
||||||
"$invokeai_tag" ${1:+$@}
|
"$INVOKEAI_TAG" ${1:+$@}
|
||||||
|
@ -4,180 +4,275 @@ title: Changelog
|
|||||||
|
|
||||||
# :octicons-log-16: **Changelog**
|
# :octicons-log-16: **Changelog**
|
||||||
|
|
||||||
|
## v2.2.4 <small>(11 December 2022)</small>
|
||||||
|
|
||||||
|
**the `invokeai` directory**
|
||||||
|
|
||||||
|
Previously there were two directories to worry about, the directory that
|
||||||
|
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
|
||||||
|
directory that contained the models files, embeddings, configuration and
|
||||||
|
outputs. With the 2.2.4 release, this dual system is done away with, and
|
||||||
|
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
|
||||||
|
live in a directory named `invokeai`. By default this directory is located in
|
||||||
|
your home directory (e.g. `\Users\yourname` on Windows), but you can select
|
||||||
|
where it goes at install time.
|
||||||
|
|
||||||
|
After installation, you can delete the install directory (the one that the zip
|
||||||
|
file creates when it unpacks). Do **not** delete or move the `invokeai`
|
||||||
|
directory!
|
||||||
|
|
||||||
|
**Initialization file `invokeai/invokeai.init`**
|
||||||
|
|
||||||
|
You can place frequently-used startup options in this file, such as the default
|
||||||
|
number of steps or your preferred sampler. To keep everything in one place, this
|
||||||
|
file has now been moved into the `invokeai` directory and is named
|
||||||
|
`invokeai.init`.
|
||||||
|
|
||||||
|
**To update from Version 2.2.3**
|
||||||
|
|
||||||
|
The easiest route is to download and unpack one of the 2.2.4 installer files.
|
||||||
|
When it asks you for the location of the `invokeai` runtime directory, respond
|
||||||
|
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
|
||||||
|
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
|
||||||
|
and answer "Y" when asked if you want to reuse the directory.
|
||||||
|
|
||||||
|
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
|
||||||
|
does not know about the new directory layout and won't be fully functional.
|
||||||
|
|
||||||
|
**To update to 2.2.5 (and beyond) there's now an update path**
|
||||||
|
|
||||||
|
As they become available, you can update to more recent versions of InvokeAI
|
||||||
|
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
|
||||||
|
Running it without any arguments will install the most recent version of
|
||||||
|
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
|
||||||
|
script with an argument in the command shell. This syntax accepts the path to
|
||||||
|
the desired release's zip file, which you can find by clicking on the green
|
||||||
|
"Code" button on this repository's home page.
|
||||||
|
|
||||||
|
**Other 2.2.4 Improvements**
|
||||||
|
|
||||||
|
- Fix InvokeAI GUI initialization by @addianto in #1687
|
||||||
|
- fix link in documentation by @lstein in #1728
|
||||||
|
- Fix broken link by @ShawnZhong in #1736
|
||||||
|
- Remove reference to binary installer by @lstein in #1731
|
||||||
|
- documentation fixes for 2.2.3 by @lstein in #1740
|
||||||
|
- Modify installer links to point closer to the source installer by @ebr in
|
||||||
|
#1745
|
||||||
|
- add documentation warning about 1650/60 cards by @lstein in #1753
|
||||||
|
- Fix Linux source URL in installation docs by @andybearman in #1756
|
||||||
|
- Make install instructions discoverable in readme by @damian0815 in #1752
|
||||||
|
- typo fix by @ofirkris in #1755
|
||||||
|
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
|
||||||
|
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
|
||||||
|
in #1765
|
||||||
|
- stability and usage improvements to binary & source installers by @lstein in
|
||||||
|
#1760
|
||||||
|
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
|
||||||
|
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
|
||||||
|
- invoke script cds to its location before running by @lstein in #1805
|
||||||
|
- Make PaperCut and VoxelArt models load again by @lstein in #1730
|
||||||
|
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
|
||||||
|
#1817
|
||||||
|
- Clean up readme by @hipsterusername in #1820
|
||||||
|
- Optimized Docker build with support for external working directory by @ebr in
|
||||||
|
#1544
|
||||||
|
- disable pushing the cloud container by @mauwii in #1831
|
||||||
|
- Fix docker push github action and expand with additional metadata by @ebr in
|
||||||
|
#1837
|
||||||
|
- Fix Broken Link To Notebook by @VedantMadane in #1821
|
||||||
|
- Account for flat models by @spezialspezial in #1766
|
||||||
|
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
|
||||||
|
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
|
||||||
|
@SammCheese in #1848
|
||||||
|
- Make force free GPU memory work in img2img by @addianto in #1844
|
||||||
|
- New installer by @lstein
|
||||||
|
|
||||||
|
## v2.2.3 <small>(2 December 2022)</small>
|
||||||
|
|
||||||
|
!!! Note
|
||||||
|
|
||||||
|
This point release removes references to the binary installer from the
|
||||||
|
installation guide. The binary installer is not stable at the current
|
||||||
|
time. First time users are encouraged to use the "source" installer as
|
||||||
|
described in [Installing InvokeAI with the Source Installer](installation/INSTALL_SOURCE.md)
|
||||||
|
|
||||||
|
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
||||||
|
robust workflow solution for creating AI-generated and human facilitated
|
||||||
|
compositions. Additional enhancements have been made as well, improving safety,
|
||||||
|
ease of use, and installation.
|
||||||
|
|
||||||
|
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
||||||
|
512x768 image (and less for smaller images), and is compatible with
|
||||||
|
Windows/Linux/Mac (M1 & M2).
|
||||||
|
|
||||||
|
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
||||||
|
introduces the main WebUI enhancement for version 2.2 -
|
||||||
|
[The Unified Canvas](features/UNIFIED_CANVAS.md). This new workflow is the
|
||||||
|
biggest enhancement added to the WebUI to date, and unlocks a stunning amount of
|
||||||
|
potential for users to create and iterate on their creations. The following
|
||||||
|
sections describe what's new for InvokeAI.
|
||||||
|
|
||||||
|
## v2.2.2 <small>(30 November 2022)</small>
|
||||||
|
|
||||||
|
!!! note
|
||||||
|
|
||||||
|
The binary installer is not ready for prime time. First time users are recommended to install via the "source" installer accessible through the links at the bottom of this page.****
|
||||||
|
|
||||||
|
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
||||||
|
robust workflow solution for creating AI-generated and human facilitated
|
||||||
|
compositions. Additional enhancements have been made as well, improving safety,
|
||||||
|
ease of use, and installation.
|
||||||
|
|
||||||
|
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
||||||
|
512x768 image (and less for smaller images), and is compatible with
|
||||||
|
Windows/Linux/Mac (M1 & M2).
|
||||||
|
|
||||||
|
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
||||||
|
introduces the main WebUI enhancement for version 2.2 -
|
||||||
|
[The Unified Canvas](https://invoke-ai.github.io/InvokeAI/features/UNIFIED_CANVAS/).
|
||||||
|
This new workflow is the biggest enhancement added to the WebUI to date, and
|
||||||
|
unlocks a stunning amount of potential for users to create and iterate on their
|
||||||
|
creations. The following sections describe what's new for InvokeAI.
|
||||||
|
|
||||||
|
## v2.2.0 <small>(2 December 2022)</small>
|
||||||
|
|
||||||
|
With InvokeAI 2.2, this project now provides enthusiasts and professionals a
|
||||||
|
robust workflow solution for creating AI-generated and human facilitated
|
||||||
|
compositions. Additional enhancements have been made as well, improving safety,
|
||||||
|
ease of use, and installation.
|
||||||
|
|
||||||
|
Optimized for efficiency, InvokeAI needs only ~3.5GB of VRAM to generate a
|
||||||
|
512x768 image (and less for smaller images), and is compatible with
|
||||||
|
Windows/Linux/Mac (M1 & M2).
|
||||||
|
|
||||||
|
You can see the [release video](https://youtu.be/hIYBfDtKaus) here, which
|
||||||
|
introduces the main WebUI enhancement for version 2.2 -
|
||||||
|
[The Unified Canvas](features/UNIFIED_CANVAS.md). This new workflow is the
|
||||||
|
biggest enhancement added to the WebUI to date, and unlocks a stunning amount of
|
||||||
|
potential for users to create and iterate on their creations. The following
|
||||||
|
sections describe what's new for InvokeAI.
|
||||||
|
|
||||||
|
## v2.1.3 <small>(13 November 2022)</small>
|
||||||
|
|
||||||
|
- A choice of installer scripts that automate installation and configuration.
|
||||||
|
See
|
||||||
|
[Installation](installation/index.md).
|
||||||
|
- A streamlined manual installation process that works for both Conda and
|
||||||
|
PIP-only installs. See
|
||||||
|
[Manual Installation](installation/INSTALL_MANUAL.md).
|
||||||
|
- The ability to save frequently-used startup options (model to load, steps,
|
||||||
|
sampler, etc) in a `.invokeai` file. See
|
||||||
|
[Client](features/CLI.md)
|
||||||
|
- Support for AMD GPU cards (non-CUDA) on Linux machines.
|
||||||
|
- Multiple bugs and edge cases squashed.
|
||||||
|
|
||||||
## v2.1.0 <small>(2 November 2022)</small>
|
## v2.1.0 <small>(2 November 2022)</small>
|
||||||
|
|
||||||
- update mac instructions to use invokeai for env name by @willwillems in
|
- update mac instructions to use invokeai for env name by @willwillems in #1030
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1030
|
- Update .gitignore by @blessedcoolant in #1040
|
||||||
- Update .gitignore by @blessedcoolant in
|
- reintroduce fix for m1 from #579 missing after merge by @skurovec in #1056
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1040
|
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in #1060
|
||||||
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
- Print out the device type which is used by @manzke in #1073
|
||||||
missing after merge by @skurovec in
|
- Hires Addition by @hipsterusername in #1063
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1056
|
|
||||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1060
|
|
||||||
- Print out the device type which is used by @manzke in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1073
|
|
||||||
- Hires Addition by @hipsterusername in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1063
|
|
||||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||||
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
@skurovec in #1081
|
||||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||||
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
warning by @db3000 in #1077
|
||||||
- fix noisy images at high step counts by @lstein in
|
- fix noisy images at high step counts by @lstein in #1086
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1086
|
- Generalize facetool strength argument by @db3000 in #1078
|
||||||
- Generalize facetool strength argument by @db3000 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1078
|
|
||||||
- Enable fast switching among models at the invoke> command line by @lstein in
|
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1066
|
#1066
|
||||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in #1095
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1095
|
- Update generate.py by @unreleased in #1109
|
||||||
- Update generate.py by @unreleased in
|
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in #1125
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1109
|
- Fixed documentation typos and resolved merge conflicts by @rupeshs in #1123
|
||||||
- Update 'ldm' env to 'invokeai' in troubleshooting steps by @19wolf in
|
- Fix broken doc links, fix malaprop in the project subtitle by @majick in #1131
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1125
|
- Only output facetool parameters if enhancing faces by @db3000 in #1119
|
||||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1123
|
|
||||||
- Fix broken doc links, fix malaprop in the project subtitle by @majick in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1131
|
|
||||||
- Only output facetool parameters if enhancing faces by @db3000 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1119
|
|
||||||
- Update gitignore to ignore codeformer weights at new location by
|
- Update gitignore to ignore codeformer weights at new location by
|
||||||
@spezialspezial in https://github.com/invoke-ai/InvokeAI/pull/1136
|
@spezialspezial in #1136
|
||||||
- fix links to point to invoke-ai.github.io #1117 by @mauwii in
|
- fix links to point to invoke-ai.github.io #1117 by @mauwii in #1143
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1143
|
- Rework-mkdocs by @mauwii in #1144
|
||||||
- Rework-mkdocs by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1144
|
|
||||||
- add option to CLI and pngwriter that allows user to set PNG compression level
|
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||||
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
by @lstein in #1127
|
||||||
- Fix img2img DDIM index out of bound by @wfng92 in
|
- Fix img2img DDIM index out of bound by @wfng92 in #1137
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1137
|
- Fix gh actions by @mauwii in #1128
|
||||||
- Fix gh actions by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1128
|
- update mac instructions to use invokeai for env name by @willwillems in #1030
|
||||||
- update mac instructions to use invokeai for env name by @willwillems in
|
- Update .gitignore by @blessedcoolant in #1040
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1030
|
- reintroduce fix for m1 from #579 missing after merge by @skurovec in #1056
|
||||||
- Update .gitignore by @blessedcoolant in
|
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in #1060
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1040
|
- Print out the device type which is used by @manzke in #1073
|
||||||
- reintroduce fix for m1 from https://github.com/invoke-ai/InvokeAI/pull/579
|
- Hires Addition by @hipsterusername in #1063
|
||||||
missing after merge by @skurovec in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1056
|
|
||||||
- Update Stable_Diffusion_AI_Notebook.ipynb (Take 2) by @ChloeL19 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1060
|
|
||||||
- Print out the device type which is used by @manzke in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1073
|
|
||||||
- Hires Addition by @hipsterusername in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1063
|
|
||||||
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
- fix for "1 leaked semaphore objects to clean up at shutdown" on M1 by
|
||||||
@skurovec in https://github.com/invoke-ai/InvokeAI/pull/1081
|
@skurovec in #1081
|
||||||
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
- Forward dream.py to invoke.py using the same interpreter, add deprecation
|
||||||
warning by @db3000 in https://github.com/invoke-ai/InvokeAI/pull/1077
|
warning by @db3000 in #1077
|
||||||
- fix noisy images at high step counts by @lstein in
|
- fix noisy images at high step counts by @lstein in #1086
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1086
|
- Generalize facetool strength argument by @db3000 in #1078
|
||||||
- Generalize facetool strength argument by @db3000 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1078
|
|
||||||
- Enable fast switching among models at the invoke> command line by @lstein in
|
- Enable fast switching among models at the invoke> command line by @lstein in
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1066
|
#1066
|
||||||
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in
|
- Fix Typo, committed changing ldm environment to invokeai by @jdries3 in #1095
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1095
|
- Fixed documentation typos and resolved merge conflicts by @rupeshs in #1123
|
||||||
- Fixed documentation typos and resolved merge conflicts by @rupeshs in
|
- Only output facetool parameters if enhancing faces by @db3000 in #1119
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1123
|
|
||||||
- Only output facetool parameters if enhancing faces by @db3000 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1119
|
|
||||||
- add option to CLI and pngwriter that allows user to set PNG compression level
|
- add option to CLI and pngwriter that allows user to set PNG compression level
|
||||||
by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1127
|
by @lstein in #1127
|
||||||
- Fix img2img DDIM index out of bound by @wfng92 in
|
- Fix img2img DDIM index out of bound by @wfng92 in #1137
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1137
|
- Add text prompt to inpaint mask support by @lstein in #1133
|
||||||
- Add text prompt to inpaint mask support by @lstein in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1133
|
|
||||||
- Respect http[s] protocol when making socket.io middleware by @damian0815 in
|
- Respect http[s] protocol when making socket.io middleware by @damian0815 in
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/976
|
#976
|
||||||
- WebUI: Adds Codeformer support by @psychedelicious in
|
- WebUI: Adds Codeformer support by @psychedelicious in #1151
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1151
|
- Skips normalizing prompts for web UI metadata by @psychedelicious in #1165
|
||||||
- Skips normalizing prompts for web UI metadata by @psychedelicious in
|
- Add Asymmetric Tiling by @carson-katri in #1132
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1165
|
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in #1172
|
||||||
- Add Asymmetric Tiling by @carson-katri in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1132
|
|
||||||
- Web UI: Increases max CFG Scale to 200 by @psychedelicious in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1172
|
|
||||||
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious
|
- Corrects color channels in face restoration; Fixes #1167 by @psychedelicious
|
||||||
in https://github.com/invoke-ai/InvokeAI/pull/1175
|
in #1175
|
||||||
- Flips channels using array slicing instead of using OpenCV by @psychedelicious
|
- Flips channels using array slicing instead of using OpenCV by @psychedelicious
|
||||||
in https://github.com/invoke-ai/InvokeAI/pull/1178
|
in #1178
|
||||||
- Fix typo in docs: s/Formally/Formerly by @noodlebox in
|
- Fix typo in docs: s/Formally/Formerly by @noodlebox in #1176
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1176
|
- fix clipseg loading problems by @lstein in #1177
|
||||||
- fix clipseg loading problems by @lstein in
|
- Correct color channels in upscale using array slicing by @wfng92 in #1181
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1177
|
|
||||||
- Correct color channels in upscale using array slicing by @wfng92 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1181
|
|
||||||
- Web UI: Filters existing images when adding new images; Fixes #1085 by
|
- Web UI: Filters existing images when adding new images; Fixes #1085 by
|
||||||
@psychedelicious in https://github.com/invoke-ai/InvokeAI/pull/1171
|
@psychedelicious in #1171
|
||||||
- fix a number of bugs in textual inversion by @lstein in
|
- fix a number of bugs in textual inversion by @lstein in #1190
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1190
|
- Improve !fetch, add !replay command by @ArDiouscuros in #882
|
||||||
- Improve !fetch, add !replay command by @ArDiouscuros in
|
- Fix generation of image with s>1000 by @holstvoogd in #951
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/882
|
- Web UI: Gallery improvements by @psychedelicious in #1198
|
||||||
- Fix generation of image with s>1000 by @holstvoogd in
|
- Update CLI.md by @krummrey in #1211
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/951
|
- outcropping improvements by @lstein in #1207
|
||||||
- Web UI: Gallery improvements by @psychedelicious in
|
- add support for loading VAE autoencoders by @lstein in #1216
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1198
|
- remove duplicate fix_func for MPS by @wfng92 in #1210
|
||||||
- Update CLI.md by @krummrey in https://github.com/invoke-ai/InvokeAI/pull/1211
|
- Metadata storage and retrieval fixes by @lstein in #1204
|
||||||
- outcropping improvements by @lstein in
|
- nix: add shell.nix file by @Cloudef in #1170
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1207
|
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in #1185
|
||||||
- add support for loading VAE autoencoders by @lstein in
|
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in #1187
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1216
|
|
||||||
- remove duplicate fix_func for MPS by @wfng92 in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1210
|
|
||||||
- Metadata storage and retrieval fixes by @lstein in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1204
|
|
||||||
- nix: add shell.nix file by @Cloudef in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1170
|
|
||||||
- Web UI: Changes vite dist asset paths to relative by @psychedelicious in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1185
|
|
||||||
- Web UI: Removes isDisabled from PromptInput by @psychedelicious in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1187
|
|
||||||
- Allow user to generate images with initial noise as on M1 / mps system by
|
- Allow user to generate images with initial noise as on M1 / mps system by
|
||||||
@ArDiouscuros in https://github.com/invoke-ai/InvokeAI/pull/981
|
@ArDiouscuros in #981
|
||||||
- feat: adding filename format template by @plucked in
|
- feat: adding filename format template by @plucked in #968
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/968
|
- Web UI: Fixes broken bundle by @psychedelicious in #1242
|
||||||
- Web UI: Fixes broken bundle by @psychedelicious in
|
- Support runwayML custom inpainting model by @lstein in #1243
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1242
|
- Update IMG2IMG.md by @talitore in #1262
|
||||||
- Support runwayML custom inpainting model by @lstein in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1243
|
|
||||||
- Update IMG2IMG.md by @talitore in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1262
|
|
||||||
- New dockerfile - including a build- and a run- script as well as a GH-Action
|
- New dockerfile - including a build- and a run- script as well as a GH-Action
|
||||||
by @mauwii in https://github.com/invoke-ai/InvokeAI/pull/1233
|
by @mauwii in #1233
|
||||||
- cut over from karras to model noise schedule for higher steps by @lstein in
|
- cut over from karras to model noise schedule for higher steps by @lstein in
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1222
|
#1222
|
||||||
- Prompt tweaks by @lstein in https://github.com/invoke-ai/InvokeAI/pull/1268
|
- Prompt tweaks by @lstein in #1268
|
||||||
- Outpainting implementation by @Kyle0654 in
|
- Outpainting implementation by @Kyle0654 in #1251
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1251
|
- fixing aspect ratio on hires by @tjennings in #1249
|
||||||
- fixing aspect ratio on hires by @tjennings in
|
- Fix-build-container-action by @mauwii in #1274
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1249
|
- handle all unicode characters by @damian0815 in #1276
|
||||||
- Fix-build-container-action by @mauwii in
|
- adds models.user.yml to .gitignore by @JakeHL in #1281
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1274
|
- remove debug branch, set fail-fast to false by @mauwii in #1284
|
||||||
- handle all unicode characters by @damian0815 in
|
- Protect-secrets-on-pr by @mauwii in #1285
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1276
|
- Web UI: Adds initial inpainting implementation by @psychedelicious in #1225
|
||||||
- adds models.user.yml to .gitignore by @JakeHL in
|
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in #1289
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1281
|
- Use proper authentication to download model by @mauwii in #1287
|
||||||
- remove debug branch, set fail-fast to false by @mauwii in
|
- Prevent indexing error for mode RGB by @spezialspezial in #1294
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1284
|
|
||||||
- Protect-secrets-on-pr by @mauwii in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1285
|
|
||||||
- Web UI: Adds initial inpainting implementation by @psychedelicious in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1225
|
|
||||||
- fix environment-mac.yml - tested on x64 and arm64 by @mauwii in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1289
|
|
||||||
- Use proper authentication to download model by @mauwii in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1287
|
|
||||||
- Prevent indexing error for mode RGB by @spezialspezial in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1294
|
|
||||||
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
- Integrate sd-v1-5 model into test matrix (easily expandable), remove
|
||||||
unecesarry caches by @mauwii in
|
unecesarry caches by @mauwii in #1293
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1293
|
- add --no-interactive to configure_invokeai step by @mauwii in #1302
|
||||||
- add --no-interactive to configure_invokeai step by @mauwii in
|
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1302
|
|
||||||
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
- 1-click installer and updater. Uses micromamba to install git and conda into a
|
||||||
contained environment (if necessary) before running the normal installation
|
contained environment (if necessary) before running the normal installation
|
||||||
script by @cmdr2 in https://github.com/invoke-ai/InvokeAI/pull/1253
|
script by @cmdr2 in #1253
|
||||||
- configure_invokeai.py script downloads the weight files by @lstein in
|
- configure_invokeai.py script downloads the weight files by @lstein in #1290
|
||||||
https://github.com/invoke-ai/InvokeAI/pull/1290
|
|
||||||
|
|
||||||
## v2.0.1 <small>(13 October 2022)</small>
|
## v2.0.1 <small>(13 October 2022)</small>
|
||||||
|
|
||||||
|
@ -37,8 +37,9 @@ generated using the command-line client and the Stable Diffusion 1.5 model:
|
|||||||
You can also combine styles and concepts:
|
You can also combine styles and concepts:
|
||||||
|
|
||||||
<figure markdown>
|
<figure markdown>
|
||||||
![](../assets/concepts/image5.png)
|
| A portrait of <alf> in <cartoona-animal> style |
|
||||||
<figcaption>A portrait of <alf> in <cartoona-animal> style</figcaption>
|
| :--------------------------------------------------------: |
|
||||||
|
| ![](../assets/concepts/image5.png) |
|
||||||
</figure>
|
</figure>
|
||||||
## Using a Hugging Face Concept
|
## Using a Hugging Face Concept
|
||||||
|
|
||||||
@ -49,20 +50,22 @@ find out what each concept is for, you can browse the
|
|||||||
look at examples of what each concept produces.
|
look at examples of what each concept produces.
|
||||||
|
|
||||||
When you have an idea of a concept you wish to try, go to the command-line
|
When you have an idea of a concept you wish to try, go to the command-line
|
||||||
client (CLI) and type a "<" character and the beginning of the Hugging Face
|
client (CLI) and type a `<` character and the beginning of the Hugging Face
|
||||||
concept name you wish to load. Press the Tab key, and the CLI will show you all
|
concept name you wish to load. Press ++tab++, and the CLI will show you all
|
||||||
matching concepts. You can also type "<" and Tab to get a listing of all ~800
|
matching concepts. You can also type `<` and hit ++tab++ to get a listing of all
|
||||||
concepts, but be prepared to scroll up to see them all! If there is more than
|
~800 concepts, but be prepared to scroll up to see them all! If there is more
|
||||||
one match you can continue to type and Tab until the concept is completed.
|
than one match you can continue to type and ++tab++ until the concept is
|
||||||
|
completed.
|
||||||
|
|
||||||
For example if you type "<x" and Tab, you'll be prompted with the
|
!!! example
|
||||||
completions:
|
|
||||||
|
|
||||||
```
|
if you type in `<x` and hit ++tab++, you'll be prompted with the completions:
|
||||||
|
|
||||||
|
```py
|
||||||
<xatu2> <xatu> <xbh> <xi> <xidiversity> <xioboma> <xuna> <xyz>
|
<xatu2> <xatu> <xbh> <xi> <xidiversity> <xioboma> <xuna> <xyz>
|
||||||
```
|
```
|
||||||
|
|
||||||
Now type "id" and press Tab. It will be autocompleted to "<xidiversity>"
|
Now type `id` and press ++tab++. It will be autocompleted to `<xidiversity>`
|
||||||
because this is a unique match.
|
because this is a unique match.
|
||||||
|
|
||||||
Finish your prompt and generate as usual. You may include multiple concept terms
|
Finish your prompt and generate as usual. You may include multiple concept terms
|
||||||
@ -75,10 +78,10 @@ locally (in the `models/sd-concepts-library` directory) for future use.
|
|||||||
Several steps happen during downloading and installation, including a scan of
|
Several steps happen during downloading and installation, including a scan of
|
||||||
the file for malicious code. Should any errors occur, you will be warned and the
|
the file for malicious code. Should any errors occur, you will be warned and the
|
||||||
concept will fail to load. Generation will then continue treating the trigger
|
concept will fail to load. Generation will then continue treating the trigger
|
||||||
term as a normal string of characters (e.g. as literal "<ghibli-face>").
|
term as a normal string of characters (e.g. as literal `<ghibli-face>`).
|
||||||
|
|
||||||
Currently auto-installation of concepts is a feature only available on the
|
You can also use `<concept-names>` in the WebGUI's prompt textbox. There is no
|
||||||
command-line client. Support for the WebUI is a work in progress.
|
autocompletion at this time.
|
||||||
|
|
||||||
## Installing your Own TI Files
|
## Installing your Own TI Files
|
||||||
|
|
||||||
|
@ -158,7 +158,7 @@ when filling in missing regions. It has an almost uncanny ability to blend the
|
|||||||
new regions with existing ones in a semantically coherent way.
|
new regions with existing ones in a semantically coherent way.
|
||||||
|
|
||||||
To install the inpainting model, follow the
|
To install the inpainting model, follow the
|
||||||
[instructions](../installation/INSTALLING_MODELS.md) for installing a new model.
|
[instructions](../installation/050_INSTALLING_MODELS.md) for installing a new model.
|
||||||
You may use either the CLI (`invoke.py` script) or directly edit the
|
You may use either the CLI (`invoke.py` script) or directly edit the
|
||||||
`configs/models.yaml` configuration file to do this. The main thing to watch out
|
`configs/models.yaml` configuration file to do this. The main thing to watch out
|
||||||
for is that the the model `config` option must be set up to use
|
for is that the the model `config` option must be set up to use
|
||||||
|
223
docs/index.md
223
docs/index.md
@ -6,15 +6,14 @@ title: Home
|
|||||||
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
|
The Docs you find here (/docs/*) are built and deployed via mkdocs. If you want to run a local version to verify your changes, it's as simple as::
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install -r requirements-mkdocs.txt
|
pip install -r docs/requirements-mkdocs.txt
|
||||||
mkdocs serve
|
mkdocs serve
|
||||||
```
|
```
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<div align="center" markdown>
|
<div align="center" markdown>
|
||||||
|
|
||||||
# ^^**InvokeAI: A Stable Diffusion Toolkit**^^ :tools: <br> <small>Formerly known as lstein/stable-diffusion</small>
|
[![project logo](assets/invoke_ai_banner.png)](https://github.com/invoke-ai/InvokeAI)
|
||||||
|
|
||||||
[![project logo](assets/logo.png)](https://github.com/invoke-ai/InvokeAI)
|
|
||||||
|
|
||||||
[![discord badge]][discord link]
|
[![discord badge]][discord link]
|
||||||
|
|
||||||
@ -70,7 +69,11 @@ image-to-image generator. It provides a streamlined process with various new
|
|||||||
features and options to aid the image generation process. It runs on Windows,
|
features and options to aid the image generation process. It runs on Windows,
|
||||||
Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
||||||
|
|
||||||
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>] [<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas & Q&A</a>]
|
**Quick links**: [<a href="https://discord.gg/ZmtBAhwWhy">Discord Server</a>]
|
||||||
|
[<a href="https://github.com/invoke-ai/InvokeAI/">Code and Downloads</a>] [<a
|
||||||
|
href="https://github.com/invoke-ai/InvokeAI/issues">Bug Reports</a>] [<a
|
||||||
|
href="https://github.com/invoke-ai/InvokeAI/discussions">Discussion, Ideas &
|
||||||
|
Q&A</a>]
|
||||||
|
|
||||||
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
|
<div align="center"><img src="assets/invoke-web-server-1.png" width=640></div>
|
||||||
|
|
||||||
@ -80,20 +83,19 @@ Mac and Linux machines, and runs on GPU cards with as little as 4 GB or RAM.
|
|||||||
|
|
||||||
## :octicons-package-dependencies-24: Installation
|
## :octicons-package-dependencies-24: Installation
|
||||||
|
|
||||||
This fork is supported across Linux, Windows and Macintosh. Linux
|
This fork is supported across Linux, Windows and Macintosh. Linux users can use
|
||||||
users can use either an Nvidia-based card (with CUDA support) or an
|
either an Nvidia-based card (with CUDA support) or an AMD card (using the ROCm
|
||||||
AMD card (using the ROCm driver).
|
driver).
|
||||||
|
|
||||||
First time users, please see [Automated
|
First time users, please see
|
||||||
Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
|
[Automated Installer](installation/INSTALL_AUTOMATED.md) for a walkthrough of
|
||||||
getting InvokeAI up and running on your system. For alternative
|
getting InvokeAI up and running on your system. For alternative installation and
|
||||||
installation and upgrade instructions, please see: [InvokeAI
|
upgrade instructions, please see:
|
||||||
Installation Overview](installation/)
|
[InvokeAI Installation Overview](installation/)
|
||||||
|
|
||||||
Linux users who wish to make use of the PyPatchMatch inpainting
|
Linux users who wish to make use of the PyPatchMatch inpainting functions will
|
||||||
functions will need to perform a bit of extra work to enable this
|
need to perform a bit of extra work to enable this module. Instructions can be
|
||||||
module. Instructions can be found at [Installing
|
found at [Installing PyPatchMatch](installation/060_INSTALL_PATCHMATCH.md).
|
||||||
PyPatchMatch](installation/INSTALL_PATCHMATCH.md).
|
|
||||||
|
|
||||||
## :fontawesome-solid-computer: Hardware Requirements
|
## :fontawesome-solid-computer: Hardware Requirements
|
||||||
|
|
||||||
@ -102,12 +104,13 @@ PyPatchMatch](installation/INSTALL_PATCHMATCH.md).
|
|||||||
You wil need one of the following:
|
You wil need one of the following:
|
||||||
|
|
||||||
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
- :simple-nvidia: An NVIDIA-based graphics card with 4 GB or more VRAM memory.
|
||||||
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux only)
|
- :simple-amd: An AMD-based graphics card with 4 GB or more VRAM memory (Linux
|
||||||
|
only)
|
||||||
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
- :fontawesome-brands-apple: An Apple computer with an M1 chip.
|
||||||
|
|
||||||
We do **not recommend** the following video cards due to issues with
|
We do **not recommend** the following video cards due to issues with their
|
||||||
their running in half-precision mode and having insufficient VRAM to
|
running in half-precision mode and having insufficient VRAM to render 512x512
|
||||||
render 512x512 images in full-precision mode:
|
images in full-precision mode:
|
||||||
|
|
||||||
- NVIDIA 10xx series cards such as the 1080ti
|
- NVIDIA 10xx series cards such as the 1080ti
|
||||||
- GTX 1650 series cards
|
- GTX 1650 series cards
|
||||||
@ -131,18 +134,18 @@ render 512x512 images in full-precision mode:
|
|||||||
```bash
|
```bash
|
||||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
|
(invokeai) ~/InvokeAI$ python scripts/invoke.py --full_precision
|
||||||
```
|
```
|
||||||
|
|
||||||
## :octicons-gift-24: InvokeAI Features
|
## :octicons-gift-24: InvokeAI Features
|
||||||
|
|
||||||
- [The InvokeAI Web Interface](features/WEB.md)
|
- [The InvokeAI Web Interface](features/WEB.md) -
|
||||||
- [WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md)
|
[WebGUI hotkey reference guide](features/WEBUIHOTKEYS.md) -
|
||||||
- [WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
[WebGUI Unified Canvas for Img2Img, inpainting and outpainting](features/UNIFIED_CANVAS.md)
|
||||||
<!-- seperator -->
|
<!-- seperator -->
|
||||||
- [The Command Line Interace](features/CLI.md)
|
- [The Command Line Interace](features/CLI.md) -
|
||||||
- [Image2Image](features/IMG2IMG.md)
|
[Image2Image](features/IMG2IMG.md) - [Inpainting](features/INPAINTING.md) -
|
||||||
- [Inpainting](features/INPAINTING.md)
|
[Outpainting](features/OUTPAINTING.md) -
|
||||||
- [Outpainting](features/OUTPAINTING.md)
|
[Adding custom styles and subjects](features/CONCEPTS.md) -
|
||||||
- [Adding custom styles and subjects](features/CONCEPTS.md)
|
[Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
||||||
- [Upscaling and Face Reconstruction](features/POSTPROCESS.md)
|
|
||||||
<!-- seperator -->
|
<!-- seperator -->
|
||||||
- [Generating Variations](features/VARIATIONS.md)
|
- [Generating Variations](features/VARIATIONS.md)
|
||||||
<!-- seperator -->
|
<!-- seperator -->
|
||||||
@ -155,99 +158,91 @@ render 512x512 images in full-precision mode:
|
|||||||
|
|
||||||
## :octicons-log-16: Latest Changes
|
## :octicons-log-16: Latest Changes
|
||||||
|
|
||||||
### v2.1.3 <small>(13 November 2022)</small>
|
### v2.2.4 <small>(11 December 2022)</small>
|
||||||
|
|
||||||
- A choice of installer scripts that automate installation and configuration. See [Installation](https://github.com/invoke-ai/InvokeAI/blob/2.1.3-rc6/docs/installation/INSTALL.md).
|
#### the `invokeai` directory
|
||||||
- A streamlined manual installation process that works for both Conda and PIP-only installs. See [Manual Installation](https://github.com/invoke-ai/InvokeAI/blob/2.1.3-rc6/docs/installation/INSTALL_MANUAL.md).
|
|
||||||
- The ability to save frequently-used startup options (model to load, steps, sampler, etc) in a `.invokeai` file. See [Client](https://github.com/invoke-ai/InvokeAI/blob/2.1.3-rc6/docs/features/CLI.md)
|
|
||||||
- Support for AMD GPU cards (non-CUDA) on Linux machines.
|
|
||||||
- Multiple bugs and edge cases squashed.
|
|
||||||
|
|
||||||
### v2.1.0 <small>(2 November 2022)</small>
|
Previously there were two directories to worry about, the directory that
|
||||||
|
contained the InvokeAI source code and the launcher scripts, and the `invokeai`
|
||||||
|
directory that contained the models files, embeddings, configuration and
|
||||||
|
outputs. With the 2.2.4 release, this dual system is done away with, and
|
||||||
|
everything, including the `invoke.bat` and `invoke.sh` launcher scripts, now
|
||||||
|
live in a directory named `invokeai`. By default this directory is located in
|
||||||
|
your home directory (e.g. `\Users\yourname` on Windows), but you can select
|
||||||
|
where it goes at install time.
|
||||||
|
|
||||||
- [Inpainting](https://invoke-ai.github.io/InvokeAI/features/INPAINTING/)
|
After installation, you can delete the install directory (the one that the zip
|
||||||
support in the WebGUI
|
file creates when it unpacks). Do **not** delete or move the `invokeai`
|
||||||
- Greatly improved navigation and user experience in the
|
directory!
|
||||||
[WebGUI](https://invoke-ai.github.io/InvokeAI/features/WEB/)
|
|
||||||
- The prompt syntax has been enhanced with
|
|
||||||
[prompt weighting, cross-attention and prompt merging](https://invoke-ai.github.io/InvokeAI/features/PROMPTS/).
|
|
||||||
- You can now load
|
|
||||||
[multiple models and switch among them quickly](https://docs.google.com/presentation/d/1WywGA1rny7bpFh7CLSdTr4nNpVKdlUeT0Bj0jCsILyU/edit?usp=sharing)
|
|
||||||
without leaving the CLI.
|
|
||||||
- The installation process (via `scripts/configure_invokeai.py`) now lets you select
|
|
||||||
among several popular
|
|
||||||
[Stable Diffusion models](https://invoke-ai.github.io/InvokeAI/installation/INSTALLING_MODELS/)
|
|
||||||
and downloads and installs them on your behalf. Among other models, this
|
|
||||||
script will install the current Stable Diffusion 1.5 model as well as a
|
|
||||||
StabilityAI variable autoencoder (VAE) which improves face generation.
|
|
||||||
- Tired of struggling with photoeditors to get the masked region of for
|
|
||||||
inpainting just right? Let the AI make the mask for you using
|
|
||||||
[text masking](https://docs.google.com/presentation/d/1pWoY510hCVjz0M6X9CBbTznZgW2W5BYNKrmZm7B45q8/edit#slide=id.p).
|
|
||||||
This feature allows you to specify the part of the image to paint over using
|
|
||||||
just English-language phrases.
|
|
||||||
- Tired of seeing the head of your subjects cropped off? Uncrop them in the CLI
|
|
||||||
with the
|
|
||||||
[outcrop feature](https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/#outcrop).
|
|
||||||
- Tired of seeing your subject's bodies duplicated or mangled when generating
|
|
||||||
larger-dimension images? Check out the `--hires` option in the CLI, or select
|
|
||||||
the corresponding toggle in the WebGUI.
|
|
||||||
- We now support textual inversion and fine-tune .bin styles and subjects from
|
|
||||||
the Hugging Face archive of
|
|
||||||
[SD Concepts](https://huggingface.co/sd-concepts-library). Load the .bin file
|
|
||||||
using the `--embedding_path` option. (The next version will support merging
|
|
||||||
and loading of multiple simultaneous models).
|
|
||||||
- ...
|
|
||||||
|
|
||||||
### v2.0.1 <small>(13 October 2022)</small>
|
##### Initialization file `invokeai/invokeai.init`
|
||||||
|
|
||||||
- fix noisy images at high step count when using k\* samplers
|
You can place frequently-used startup options in this file, such as the default
|
||||||
- dream.py script now calls invoke.py module directly rather than via a new
|
number of steps or your preferred sampler. To keep everything in one place, this
|
||||||
python process (which could break the environment)
|
file has now been moved into the `invokeai` directory and is named
|
||||||
|
`invokeai.init`.
|
||||||
|
|
||||||
### v2.0.0 <small>(9 October 2022)</small>
|
#### To update from Version 2.2.3
|
||||||
|
|
||||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains for
|
The easiest route is to download and unpack one of the 2.2.4 installer files.
|
||||||
backward compatibility.
|
When it asks you for the location of the `invokeai` runtime directory, respond
|
||||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
with the path to the directory that contains your 2.2.3 `invokeai`. That is, if
|
||||||
- Support for
|
`invokeai` lives at `C:\Users\fred\invokeai`, then answer with `C:\Users\fred`
|
||||||
<a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a>
|
and answer "Y" when asked if you want to reuse the directory.
|
||||||
and
|
|
||||||
<a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
The `update.sh` (`update.bat`) script that came with the 2.2.3 source installer
|
||||||
- img2img runs on all k\* samplers
|
does not know about the new directory layout and won't be fully functional.
|
||||||
- Support for
|
|
||||||
<a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative
|
#### To update to 2.2.5 (and beyond) there's now an update path.
|
||||||
prompts</a>
|
|
||||||
- Support for CodeFormer face reconstruction
|
As they become available, you can update to more recent versions of InvokeAI
|
||||||
- Support for Textual Inversion on Macintoshes
|
using an `update.sh` (`update.bat`) script located in the `invokeai` directory.
|
||||||
- Support in both WebGUI and CLI for
|
Running it without any arguments will install the most recent version of
|
||||||
<a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing
|
InvokeAI. Alternatively, you can get set releases by running the `update.sh`
|
||||||
of previously-generated images</a> using facial reconstruction, ESRGAN
|
script with an argument in the command shell. This syntax accepts the path to
|
||||||
upscaling, outcropping (similar to DALL-E infinite canvas), and "embiggen"
|
the desired release's zip file, which you can find by clicking on the green
|
||||||
upscaling. See the `!fix` command.
|
"Code" button on this repository's home page.
|
||||||
- New `--hires` option on `invoke>` line allows
|
|
||||||
<a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger
|
#### Other 2.2.4 Improvements
|
||||||
images to be created without duplicating elements</a>, at the cost of some
|
|
||||||
performance.
|
- Fix InvokeAI GUI initialization by @addianto in #1687
|
||||||
- New `--perlin` and `--threshold` options allow you to add and control
|
- fix link in documentation by @lstein in #1728
|
||||||
variation during image generation (see
|
- Fix broken link by @ShawnZhong in #1736
|
||||||
<a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding
|
- Remove reference to binary installer by @lstein in #1731
|
||||||
and Perlin Noise Initialization</a>
|
- documentation fixes for 2.2.3 by @lstein in #1740
|
||||||
- Extensive metadata now written into PNG files, allowing reliable regeneration
|
- Modify installer links to point closer to the source installer by @ebr in
|
||||||
of images and tweaking of previous settings.
|
#1745
|
||||||
- Command-line completion in `invoke.py` now works on Windows, Linux and Mac
|
- add documentation warning about 1650/60 cards by @lstein in #1753
|
||||||
platforms.
|
- Fix Linux source URL in installation docs by @andybearman in #1756
|
||||||
- Improved
|
- Make install instructions discoverable in readme by @damian0815 in #1752
|
||||||
<a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line
|
- typo fix by @ofirkris in #1755
|
||||||
completion behavior</a>. New commands added:
|
- Non-interactive model download (support HUGGINGFACE_TOKEN) by @ebr in #1578
|
||||||
- List command-line history with `!history`
|
- fix(srcinstall): shell installer - cp scripts instead of linking by @tildebyte
|
||||||
- Search command-line history with `!search`
|
in #1765
|
||||||
- Clear history with `!clear`
|
- stability and usage improvements to binary & source installers by @lstein in
|
||||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
#1760
|
||||||
configure. To switch away from auto use the new flag like
|
- fix off-by-one bug in cross-attention-control by @damian0815 in #1774
|
||||||
`--precision=float32`.
|
- Eventually update APP_VERSION to 2.2.3 by @spezialspezial in #1768
|
||||||
|
- invoke script cds to its location before running by @lstein in #1805
|
||||||
|
- Make PaperCut and VoxelArt models load again by @lstein in #1730
|
||||||
|
- Fix --embedding_directory / --embedding_path not working by @blessedcoolant in
|
||||||
|
#1817
|
||||||
|
- Clean up readme by @hipsterusername in #1820
|
||||||
|
- Optimized Docker build with support for external working directory by @ebr in
|
||||||
|
#1544
|
||||||
|
- disable pushing the cloud container by @mauwii in #1831
|
||||||
|
- Fix docker push github action and expand with additional metadata by @ebr in
|
||||||
|
#1837
|
||||||
|
- Fix Broken Link To Notebook by @VedantMadane in #1821
|
||||||
|
- Account for flat models by @spezialspezial in #1766
|
||||||
|
- Update invoke.bat.in isolate environment variables by @lynnewu in #1833
|
||||||
|
- Arch Linux Specific PatchMatch Instructions & fixing conda install on linux by
|
||||||
|
@SammCheese in #1848
|
||||||
|
- Make force free GPU memory work in img2img by @addianto in #1844
|
||||||
|
- New installer by @lstein
|
||||||
|
|
||||||
For older changelogs, please visit the
|
For older changelogs, please visit the
|
||||||
**[CHANGELOG](CHANGELOG/#v114-11-september-2022)**.
|
**[CHANGELOG](CHANGELOG/#v223-2-december-2022)**.
|
||||||
|
|
||||||
## :material-target: Troubleshooting
|
## :material-target: Troubleshooting
|
||||||
|
|
||||||
|
306
docs/installation/010_INSTALL_AUTOMATED.md
Normal file
306
docs/installation/010_INSTALL_AUTOMATED.md
Normal file
@ -0,0 +1,306 @@
|
|||||||
|
---
|
||||||
|
title: Installing with the Automated Installer
|
||||||
|
---
|
||||||
|
|
||||||
|
# InvokeAI Automated Installation
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
The automated installer is a shell script that attempts to automate every step
|
||||||
|
needed to install and run InvokeAI on a stock computer running recent versions
|
||||||
|
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
||||||
|
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||||
|
|
||||||
|
## Walk through
|
||||||
|
|
||||||
|
1. Make sure that your system meets the
|
||||||
|
[hardware requirements](../index.md#hardware-requirements) and has the
|
||||||
|
appropriate GPU drivers installed. In particular, if you are a Linux user
|
||||||
|
with an AMD GPU installed, you may need to install the
|
||||||
|
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||||
|
|
||||||
|
!!! info "Required Space"
|
||||||
|
|
||||||
|
Installation requires roughly 18G of free disk space to load the libraries and
|
||||||
|
recommended model weights files.
|
||||||
|
|
||||||
|
2. Check that your system has an up-to-date Python installed. To do this, open
|
||||||
|
up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
|
||||||
|
"Powershell" on Windows) and type `python --version`. If Python is
|
||||||
|
installed, it will print out the version number. If it is version `3.9.1` or
|
||||||
|
higher, you meet requirements.
|
||||||
|
|
||||||
|
!!! warning "If you see an older version, or get a command not found error"
|
||||||
|
|
||||||
|
Go to [Python Downloads](https://www.python.org/downloads/) and
|
||||||
|
download the appropriate installer package for your platform. We recommend
|
||||||
|
[Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
|
||||||
|
which has been extensively tested with InvokeAI.
|
||||||
|
|
||||||
|
!!! warning "At this time we do not recommend Python 3.11"
|
||||||
|
|
||||||
|
_Please select your platform in the section below for platform-specific
|
||||||
|
setup requirements._
|
||||||
|
|
||||||
|
=== "Windows users"
|
||||||
|
|
||||||
|
- During the Python configuration process,
|
||||||
|
look out for a checkbox to add Python to your PATH
|
||||||
|
and select it. If the install script complains that it can't
|
||||||
|
find python, then open the Python installer again and choose
|
||||||
|
"Modify" existing installation.
|
||||||
|
|
||||||
|
- Installation requires an up to date version of the Microsoft Visual C libraries. Please install the 2015-2022 libraries available here: https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
|
||||||
|
|
||||||
|
=== "Mac users"
|
||||||
|
|
||||||
|
- After installing Python, you may need to run the
|
||||||
|
following command from the Terminal in order to install the Web
|
||||||
|
certificates needed to download model data from https sites. If
|
||||||
|
you see lots of CERTIFICATE ERRORS during the last part of the
|
||||||
|
install, this is the problem, and you can fix it with this command:
|
||||||
|
|
||||||
|
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
||||||
|
|
||||||
|
- You may need to install the Xcode command line tools. These
|
||||||
|
are a set of tools that are needed to run certain applications in a
|
||||||
|
Terminal, including InvokeAI. This package is provided directly by Apple.
|
||||||
|
|
||||||
|
- To install, open a terminal window and run `xcode-select
|
||||||
|
--install`. You will get a macOS system popup guiding you through the
|
||||||
|
install. If you already have them installed, you will instead see some
|
||||||
|
output in the Terminal advising you that the tools are already installed.
|
||||||
|
|
||||||
|
- More information can be found here:
|
||||||
|
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
||||||
|
|
||||||
|
=== "Linux users"
|
||||||
|
|
||||||
|
For reasons that are not entirely clear, installing the correct version of Python can be a bit of a challenge on Ubuntu, Linux Mint, and otherUbuntu-derived distributions.
|
||||||
|
|
||||||
|
In particular, Ubuntu version 20.04 LTS comes with an old version of Python, does not come with the PIP package manager installed, and to make matters worse, the `python` command points to Python2, not Python3.
|
||||||
|
|
||||||
|
Here is the quick recipe for bringing your system up to date:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install python3.9
|
||||||
|
sudo apt install python3-pip
|
||||||
|
cd /usr/bin
|
||||||
|
sudo ln -sf python3.9 python3
|
||||||
|
sudo ln -sf python3 python
|
||||||
|
```
|
||||||
|
|
||||||
|
You can still access older versions of Python by calling `python2`, `python3.8`,
|
||||||
|
etc.
|
||||||
|
|
||||||
|
3. The source installer is distributed in ZIP files. Go to the
|
||||||
|
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
||||||
|
look for a series of files named:
|
||||||
|
|
||||||
|
- [InvokeAI-installer-2.2.4-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-mac.zip)
|
||||||
|
- [InvokeAI-installer-2.2.4-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-windows.zip)
|
||||||
|
- [InvokeAI-installer-2.2.4-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-linux.zip)
|
||||||
|
|
||||||
|
Download the one that is appropriate for your operating system.
|
||||||
|
|
||||||
|
4. Unpack the zip file into a convenient directory. This will create a new
|
||||||
|
directory named "InvokeAI-Installer". This example shows how this would look
|
||||||
|
using the `unzip` command-line tool, but you may use any graphical or
|
||||||
|
command-line Zip extractor:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> unzip InvokeAI-installer-2.2.4-windows.zip
|
||||||
|
Archive: C: \Linco\Downloads\InvokeAI-installer-2.2.4-windows.zip
|
||||||
|
creating: InvokeAI-Installer\
|
||||||
|
inflating: InvokeAI-Installer\install.bat
|
||||||
|
inflating: InvokeAI-Installer\readme.txt
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
After successful installation, you can delete the `InvokeAI-Installer`
|
||||||
|
directory.
|
||||||
|
|
||||||
|
5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
|
||||||
|
accept the dialog box that asks you if you wish to modify your registry.
|
||||||
|
This activates long filename support on your system and will prevent
|
||||||
|
mysterious errors during installation.
|
||||||
|
|
||||||
|
6. If you are using a desktop GUI, double-click the installer file. It will be
|
||||||
|
named `install.bat` on Windows systems and `install.sh` on Linux and
|
||||||
|
Macintosh systems.
|
||||||
|
|
||||||
|
On Windows systems you will probably get an "Untrusted Publisher" warning.
|
||||||
|
Click on "More Info" and select "Run Anyway." You trust us, right?
|
||||||
|
|
||||||
|
7. Alternatively, from the command line, run the shell script or .bat file:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> cd InvokeAI-Installer
|
||||||
|
C:\Documents\Linco\invokeAI> install.bat
|
||||||
|
```
|
||||||
|
|
||||||
|
8. The script will ask you to choose where to install InvokeAI. Select a
|
||||||
|
directory with at least 18G of free space for a full install. InvokeAI and
|
||||||
|
all its support files will be installed into a new directory named
|
||||||
|
`invokeai` located at the location you specify.
|
||||||
|
|
||||||
|
- The default is to install the `invokeai` directory in your home directory,
|
||||||
|
usually `C:\Users\YourName\invokeai` on Windows systems,
|
||||||
|
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
||||||
|
on Macintoshes, where "YourName" is your login name.
|
||||||
|
|
||||||
|
- The script uses tab autocompletion to suggest directory path completions.
|
||||||
|
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
||||||
|
to suggest completions.
|
||||||
|
|
||||||
|
9. Sit back and let the install script work. It will install the third-party
|
||||||
|
libraries needed by InvokeAI, then download the current InvokeAI release and
|
||||||
|
install it.
|
||||||
|
|
||||||
|
Be aware that some of the library download and install steps take a long
|
||||||
|
time. In particular, the `pytorch` package is quite large and often appears
|
||||||
|
to get "stuck" at 99.9%. Have patience and the installation step will
|
||||||
|
eventually resume. However, there are occasions when the library install
|
||||||
|
does legitimately get stuck. If you have been waiting for more than ten
|
||||||
|
minutes and nothing is happening, you can interrupt the script with ^C. You
|
||||||
|
may restart it and it will pick up where it left off.
|
||||||
|
|
||||||
|
10. After installation completes, the installer will launch a script called
|
||||||
|
`configure_invokeai.py`, which will guide you through the first-time process
|
||||||
|
of selecting one or more Stable Diffusion model weights files, downloading
|
||||||
|
and configuring them. We provide a list of popular models that InvokeAI
|
||||||
|
performs well with. However, you can add more weight files later on using
|
||||||
|
the command-line client or the Web UI. See
|
||||||
|
[Installing Models](050_INSTALLING_MODELS.md) for details.
|
||||||
|
|
||||||
|
Note that the main Stable Diffusion weights file is protected by a license
|
||||||
|
agreement that you must agree to in order to use. The script will list the
|
||||||
|
steps you need to take to create an account on the official site that hosts
|
||||||
|
the weights files, accept the agreement, and provide an access token that
|
||||||
|
allows InvokeAI to legally download and install the weights files.
|
||||||
|
|
||||||
|
If you have already downloaded the weights file(s) for another Stable
|
||||||
|
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||||
|
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||||
|
process for this is described in [Installing Models](050_INSTALLING_MODELS.md).
|
||||||
|
|
||||||
|
11. The script will now exit and you'll be ready to generate some images. Look
|
||||||
|
for the directory `invokeai` installed in the location you chose at the
|
||||||
|
beginning of the install session. Look for a shell script named `invoke.sh`
|
||||||
|
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
||||||
|
it or typing its name at the command-line:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
C:\Documents\Linco> cd invokeai
|
||||||
|
C:\Documents\Linco\invokeAI> invoke.bat
|
||||||
|
```
|
||||||
|
|
||||||
|
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
|
||||||
|
(1) the command-line interface, or (2) the web GUI. If you start the
|
||||||
|
latter, you can load the user interface by pointing your browser at
|
||||||
|
http://localhost:9090.
|
||||||
|
|
||||||
|
- The script also offers you a third option labeled "open the developer
|
||||||
|
console". If you choose this option, you will be dropped into a
|
||||||
|
command-line interface in which you can run python commands directly,
|
||||||
|
access developer tools, and launch InvokeAI with customized options.
|
||||||
|
|
||||||
|
12. You can launch InvokeAI with several different command-line arguments that
|
||||||
|
customize its behavior. For example, you can change the location of the
|
||||||
|
image output directory, or select your favorite sampler. See the
|
||||||
|
[Command-Line Interface](../features/CLI.md) for a full list of the options.
|
||||||
|
|
||||||
|
- To set defaults that will take effect every time you launch InvokeAI,
|
||||||
|
use a text editor (e.g. Notepad) to exit the file
|
||||||
|
`invokeai\invokeai.init`. It contains a variety of examples that you can
|
||||||
|
follow to add and modify launch options.
|
||||||
|
|
||||||
|
!!! warning "The `invokeai` directory contains the `invoke` application, its
|
||||||
|
configuration files, the model weight files, and outputs of image generation.
|
||||||
|
Once InvokeAI is installed, do not move or remove this directory."
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### _Package dependency conflicts_
|
||||||
|
|
||||||
|
If you have previously installed InvokeAI or another Stable Diffusion package,
|
||||||
|
the installer may occasionally pick up outdated libraries and either the
|
||||||
|
installer or `invoke` will fail with complaints about library conflicts. You can
|
||||||
|
address this by entering the `invokeai` directory and running `update.sh`, which
|
||||||
|
will bring InvokeAI up to date with the latest libraries.
|
||||||
|
|
||||||
|
### ldm from pypi
|
||||||
|
|
||||||
|
!!! warning
|
||||||
|
|
||||||
|
Some users have tried to correct dependency problems by installing
|
||||||
|
the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
|
||||||
|
has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
|
||||||
|
ldm will make matters worse. If you've installed ldm, uninstall it with
|
||||||
|
`pip uninstall ldm`.
|
||||||
|
|
||||||
|
### Corrupted configuration file
|
||||||
|
|
||||||
|
Everything seems to install ok, but `invoke` complains of a corrupted
|
||||||
|
configuration file and goes back into the configuration process (asking you to
|
||||||
|
download models, etc), but this doesn't fix the problem.
|
||||||
|
|
||||||
|
This issue is often caused by a misconfigured configuration directive in the
|
||||||
|
`invokeai\invokeai.init` initialization file that contains startup settings. The
|
||||||
|
easiest way to fix the problem is to move the file out of the way and re-run
|
||||||
|
`configure_invokeai.py`. Enter the developer's console (option 3 of the launcher
|
||||||
|
script) and run this command:
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
configure_invokeai.py --root=.
|
||||||
|
```
|
||||||
|
|
||||||
|
Note the dot (.) after `--root`. It is part of the command.
|
||||||
|
|
||||||
|
_If none of these maneuvers fixes the problem_ then please report the problem to
|
||||||
|
the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
||||||
|
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
||||||
|
assistance.
|
||||||
|
|
||||||
|
### other problems
|
||||||
|
|
||||||
|
If you run into problems during or after installation, the InvokeAI team is
|
||||||
|
available to help you. Either create an
|
||||||
|
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
||||||
|
make a request for help on the "bugs-and-support" channel of our
|
||||||
|
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
||||||
|
organization, but typically somebody will be available to help you within 24
|
||||||
|
hours, and often much sooner.
|
||||||
|
|
||||||
|
## Updating to newer versions
|
||||||
|
|
||||||
|
This distribution is changing rapidly, and we add new features on a daily basis.
|
||||||
|
To update to the latest released version (recommended), run the `update.sh`
|
||||||
|
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
||||||
|
release and re-run the `configure_invokeai` script to download any updated
|
||||||
|
models files that may be needed. You can also use this to add additional models
|
||||||
|
that you did not select at installation time.
|
||||||
|
|
||||||
|
You can now close the developer console and run `invoke` as before. If you get
|
||||||
|
complaints about missing models, then you may need to do the additional step of
|
||||||
|
running `configure_invokeai.py`. This happens relatively infrequently. To do
|
||||||
|
this, simply open up the developer's console again and type
|
||||||
|
`python scripts/configure_invokeai.py`.
|
||||||
|
|
||||||
|
You may also use the `update` script to install any selected version of
|
||||||
|
InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
|
||||||
|
link of the version you wish to install. You can find the zip links by going to
|
||||||
|
the one of the release pages and looking for the **Assets** section at the
|
||||||
|
bottom. Alternatively, you can browse "branches" and "tags" at the top of the
|
||||||
|
big code directory on the InvokeAI welcome page. When you find the version you
|
||||||
|
want to install, go to the green "<> Code" button at the top, and copy the
|
||||||
|
"Download ZIP" link.
|
||||||
|
|
||||||
|
Now run `update.sh` (or `update.bat`) with the URL of the desired InvokeAI
|
||||||
|
version as its argument. For example, this will install the old 2.2.0 release.
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.0.zip
|
||||||
|
```
|
||||||
|
|
579
docs/installation/020_INSTALL_MANUAL.md
Normal file
579
docs/installation/020_INSTALL_MANUAL.md
Normal file
@ -0,0 +1,579 @@
|
|||||||
|
---
|
||||||
|
title: Installing Manually
|
||||||
|
---
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
# :fontawesome-brands-linux: Linux | :fontawesome-brands-apple: macOS | :fontawesome-brands-windows: Windows
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
!!! warning "This is for advanced Users"
|
||||||
|
|
||||||
|
who are already experienced with using conda or pip
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
You have two choices for manual installation, the [first
|
||||||
|
one](#PIP_method) uses basic Python virtual environment (`venv`)
|
||||||
|
commands and the PIP package manager. The [second one](#Conda_method)
|
||||||
|
based on the Anaconda3 package manager (`conda`). Both methods require
|
||||||
|
you to enter commands on the terminal, also known as the "console".
|
||||||
|
|
||||||
|
Note that the conda install method is currently deprecated and will not
|
||||||
|
be supported at some point in the future.
|
||||||
|
|
||||||
|
On Windows systems you are encouraged to install and use the
|
||||||
|
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
||||||
|
which provides compatibility with Linux and Mac shells and nice
|
||||||
|
features such as command-line completion.
|
||||||
|
|
||||||
|
## pip Install
|
||||||
|
|
||||||
|
To install InvokeAI with virtual environments and the PIP package
|
||||||
|
manager, please follow these steps:
|
||||||
|
|
||||||
|
1. Make sure you are using Python 3.9 or 3.10. The rest of the install
|
||||||
|
procedure depends on this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -V
|
||||||
|
```
|
||||||
|
|
||||||
|
2. From within the InvokeAI top-level directory, create and activate a virtual
|
||||||
|
environment named `invokeai`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -mvenv invokeai
|
||||||
|
source invokeai/bin/activate
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Make sure that pip is installed in your virtual environment an up to date:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -mensurepip --upgrade
|
||||||
|
python -mpip install --upgrade pip
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Pick the correct `requirements*.txt` file for your hardware and operating
|
||||||
|
system.
|
||||||
|
|
||||||
|
We have created a series of environment files suited for different operating
|
||||||
|
systems and GPU hardware. They are located in the
|
||||||
|
`environments-and-requirements` directory:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|
| filename | OS |
|
||||||
|
| :---------------------------------: | :-------------------------------------------------------------: |
|
||||||
|
| requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU |
|
||||||
|
| requirements-lin-arm64.txt | Linux running on arm64 systems |
|
||||||
|
| requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU |
|
||||||
|
| requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration |
|
||||||
|
| requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU<br>(supports Google Colab too) |
|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
Select the appropriate requirements file, and make a link to it from
|
||||||
|
`requirements.txt` in the top-level InvokeAI directory. The command to do
|
||||||
|
this from the top-level directory is:
|
||||||
|
|
||||||
|
!!! example ""
|
||||||
|
|
||||||
|
=== "Macintosh and Linux"
|
||||||
|
|
||||||
|
!!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Windows"
|
||||||
|
|
||||||
|
!!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! warning
|
||||||
|
|
||||||
|
Please do not link or copy `environments-and-requirements/requirements-base.txt`.
|
||||||
|
This is a base requirements file that does not have the platform-specific
|
||||||
|
libraries. Also, be sure to link or copy the platform-specific file to
|
||||||
|
a top-level file named `requirements.txt` as shown here. Running pip on
|
||||||
|
a requirements file in a subdirectory will not work as expected.
|
||||||
|
|
||||||
|
When this is done, confirm that a file named `requirements.txt` has been
|
||||||
|
created in the InvokeAI root directory and that it points to the correct
|
||||||
|
file in `environments-and-requirements`.
|
||||||
|
|
||||||
|
5. Run PIP
|
||||||
|
|
||||||
|
Be sure that the `invokeai` environment is active before doing this:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install --prefer-binary -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Set up the runtime directory
|
||||||
|
|
||||||
|
In this step you will initialize a runtime directory that will
|
||||||
|
contain the models, model config files, directory for textual
|
||||||
|
inversion embeddings, and your outputs. This keeps the runtime
|
||||||
|
directory separate from the source code and aids in updating.
|
||||||
|
|
||||||
|
You may pick any location for this directory using the `--root_dir`
|
||||||
|
option (abbreviated --root). If you don't pass this option, it will
|
||||||
|
default to `invokeai` in your home directory.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
configure_invokeai.py --root_dir ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
The script `configure_invokeai.py` will interactively guide you through the
|
||||||
|
process of downloading and installing the weights files needed for InvokeAI.
|
||||||
|
Note that the main Stable Diffusion weights file is protected by a license
|
||||||
|
agreement that you have to agree to. The script will list the steps you need
|
||||||
|
to take to create an account on the site that hosts the weights files,
|
||||||
|
accept the agreement, and provide an access token that allows InvokeAI to
|
||||||
|
legally download and install the weights files.
|
||||||
|
|
||||||
|
If you get an error message about a module not being installed, check that
|
||||||
|
the `invokeai` environment is active and if not, repeat step 5.
|
||||||
|
|
||||||
|
Note that `configure_invokeai.py` and `invoke.py` should be installed
|
||||||
|
under your virtual environment directory and the system should find them
|
||||||
|
on the PATH. If this isn't working on your system, you can call the
|
||||||
|
scripts directory using `python scripts/configure_invoke.py` and
|
||||||
|
`python scripts/invoke.py`.
|
||||||
|
|
||||||
|
!!! tip
|
||||||
|
|
||||||
|
If you have already downloaded the weights file(s) for another Stable
|
||||||
|
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||||
|
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||||
|
process for this is described in [here](050_INSTALLING_MODELS.md).
|
||||||
|
|
||||||
|
7. Run the command-line- or the web- interface:
|
||||||
|
|
||||||
|
Activate the environment (with `source invokeai/bin/activate`), and then
|
||||||
|
run the script `invoke.py`. If you selected a non-default location
|
||||||
|
for the runtime directory, please specify the path with the `--root_dir`
|
||||||
|
option (abbreviated below as `--root`):
|
||||||
|
|
||||||
|
!!! example ""
|
||||||
|
|
||||||
|
!!! warning "Make sure that the virtual environment is activated, which should create `(invokeai)` in front of your prompt!"
|
||||||
|
|
||||||
|
=== "CLI"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.py --root ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "local Webserver"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.py --web --root ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Public Webserver"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
If you choose the run the web interface, point your browser at
|
||||||
|
http://localhost:9090 in order to load the GUI.
|
||||||
|
|
||||||
|
!!! tip
|
||||||
|
|
||||||
|
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of the directory.
|
||||||
|
|
||||||
|
8. Render away!
|
||||||
|
|
||||||
|
Browse the [features](../features/CLI.md) section to learn about all the things you
|
||||||
|
can do with InvokeAI.
|
||||||
|
|
||||||
|
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
||||||
|
card with the ROCm driver, you may have to wait for over a minute the first
|
||||||
|
time you try to generate an image. Fortunately, after the warm up period
|
||||||
|
rendering will be fast.
|
||||||
|
|
||||||
|
9. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||||
|
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
||||||
|
script. If you forget to activate the 'invokeai' environment, the script
|
||||||
|
will fail with multiple `ModuleNotFound` errors.
|
||||||
|
|
||||||
|
!!! tip
|
||||||
|
|
||||||
|
Do not move the source code repository after installation. The virtual environment directory has absolute paths in it that get confused if the directory is moved.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Conda method
|
||||||
|
|
||||||
|
1. Check that your system meets the
|
||||||
|
[hardware requirements](index.md#Hardware_Requirements) and has the
|
||||||
|
appropriate GPU drivers installed. In particular, if you are a Linux user
|
||||||
|
with an AMD GPU installed, you may need to install the
|
||||||
|
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||||
|
|
||||||
|
InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
|
||||||
|
of ROCm driver support on this platform.
|
||||||
|
|
||||||
|
To confirm that the appropriate drivers are installed, run `nvidia-smi` on
|
||||||
|
NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
|
||||||
|
information about the installed video card.
|
||||||
|
|
||||||
|
Macintosh users with MPS acceleration, or anybody with a CPU-only system,
|
||||||
|
can skip this step.
|
||||||
|
|
||||||
|
2. You will need to install Anaconda3 and Git if they are not already
|
||||||
|
available. Use your operating system's preferred package manager, or
|
||||||
|
download the installers manually. You can find them here:
|
||||||
|
|
||||||
|
- [Anaconda3](https://www.anaconda.com/)
|
||||||
|
- [git](https://git-scm.com/downloads)
|
||||||
|
|
||||||
|
3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
||||||
|
GitHub:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/invoke-ai/InvokeAI.git
|
||||||
|
```
|
||||||
|
|
||||||
|
This will create InvokeAI folder where you will follow the rest of the
|
||||||
|
steps.
|
||||||
|
|
||||||
|
4. Enter the newly-created InvokeAI folder:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd InvokeAI
|
||||||
|
```
|
||||||
|
|
||||||
|
From this step forward make sure that you are working in the InvokeAI
|
||||||
|
directory!
|
||||||
|
|
||||||
|
5. Select the appropriate environment file:
|
||||||
|
|
||||||
|
We have created a series of environment files suited for different operating
|
||||||
|
systems and GPU hardware. They are located in the
|
||||||
|
`environments-and-requirements` directory:
|
||||||
|
|
||||||
|
<figure markdown>
|
||||||
|
|
||||||
|
| filename | OS |
|
||||||
|
| :----------------------: | :----------------------------: |
|
||||||
|
| environment-lin-amd.yml | Linux with an AMD (ROCm) GPU |
|
||||||
|
| environment-lin-cuda.yml | Linux with an NVIDIA CUDA GPU |
|
||||||
|
| environment-mac.yml | Macintosh |
|
||||||
|
| environment-win-cuda.yml | Windows with an NVIDA CUDA GPU |
|
||||||
|
|
||||||
|
</figure>
|
||||||
|
|
||||||
|
Choose the appropriate environment file for your system and link or copy it
|
||||||
|
to `environment.yml` in InvokeAI's top-level directory. To do so, run
|
||||||
|
following command from the repository-root:
|
||||||
|
|
||||||
|
!!! Example ""
|
||||||
|
|
||||||
|
=== "Macintosh and Linux"
|
||||||
|
|
||||||
|
!!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
When this is done, confirm that a file `environment.yml` has been linked in
|
||||||
|
the InvokeAI root directory and that it points to the correct file in the
|
||||||
|
`environments-and-requirements`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ls -la
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Windows"
|
||||||
|
|
||||||
|
!!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Afterwards verify that the file `environment.yml` has been created, either via the
|
||||||
|
explorer or by using the command `dir` from the terminal
|
||||||
|
|
||||||
|
```cmd
|
||||||
|
dir
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
||||||
|
|
||||||
|
6. Create the conda environment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda env update
|
||||||
|
```
|
||||||
|
|
||||||
|
This will create a new environment named `invokeai` and install all InvokeAI
|
||||||
|
dependencies into it. If something goes wrong you should take a look at
|
||||||
|
[troubleshooting](#troubleshooting).
|
||||||
|
|
||||||
|
7. Activate the `invokeai` environment:
|
||||||
|
|
||||||
|
In order to use the newly created environment you will first need to
|
||||||
|
activate it
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda activate invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
Your command-line prompt should change to indicate that `invokeai` is active
|
||||||
|
by prepending `(invokeai)`.
|
||||||
|
|
||||||
|
8. Set up the runtime directory
|
||||||
|
|
||||||
|
In this step you will initialize a runtime directory that will
|
||||||
|
contain the models, model config files, directory for textual
|
||||||
|
inversion embeddings, and your outputs. This keeps the runtime
|
||||||
|
directory separate from the source code and aids in updating.
|
||||||
|
|
||||||
|
You may pick any location for this directory using the `--root_dir`
|
||||||
|
option (abbreviated --root). If you don't pass this option, it will
|
||||||
|
default to `invokeai` in your home directory.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python scripts/configure_invokeai.py --root_dir ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
The script `configure_invokeai.py` will interactively guide you through the
|
||||||
|
process of downloading and installing the weights files needed for InvokeAI.
|
||||||
|
Note that the main Stable Diffusion weights file is protected by a license
|
||||||
|
agreement that you have to agree to. The script will list the steps you need
|
||||||
|
to take to create an account on the site that hosts the weights files,
|
||||||
|
accept the agreement, and provide an access token that allows InvokeAI to
|
||||||
|
legally download and install the weights files.
|
||||||
|
|
||||||
|
If you get an error message about a module not being installed, check that
|
||||||
|
the `invokeai` environment is active and if not, repeat step 5.
|
||||||
|
|
||||||
|
Note that `configure_invokeai.py` and `invoke.py` should be
|
||||||
|
installed under your conda directory and the system should find
|
||||||
|
them automatically on the PATH. If this isn't working on your
|
||||||
|
system, you can call the scripts directory using `python
|
||||||
|
scripts/configure_invoke.py` and `python scripts/invoke.py`.
|
||||||
|
|
||||||
|
!!! tip
|
||||||
|
|
||||||
|
If you have already downloaded the weights file(s) for another Stable
|
||||||
|
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||||
|
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||||
|
process for this is described in [here](050_INSTALLING_MODELS.md).
|
||||||
|
|
||||||
|
9. Run the command-line- or the web- interface:
|
||||||
|
|
||||||
|
Activate the environment (with `source invokeai/bin/activate`), and then
|
||||||
|
run the script `invoke.py`. If you selected a non-default location
|
||||||
|
for the runtime directory, please specify the path with the `--root_dir`
|
||||||
|
option (abbreviated below as `--root`):
|
||||||
|
|
||||||
|
!!! example ""
|
||||||
|
|
||||||
|
!!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
|
||||||
|
|
||||||
|
=== "CLI"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.py --root ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "local Webserver"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.py --web --root ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
=== "Public Webserver"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
If you choose the run the web interface, point your browser at
|
||||||
|
http://localhost:9090 in order to load the GUI.
|
||||||
|
|
||||||
|
!!! tip
|
||||||
|
|
||||||
|
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of your choice.
|
||||||
|
|
||||||
|
10. Render away!
|
||||||
|
|
||||||
|
Browse the [features](../features/CLI.md) section to learn about all the things you
|
||||||
|
can do with InvokeAI.
|
||||||
|
|
||||||
|
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
||||||
|
card with the ROCm driver, you may have to wait for over a minute the first
|
||||||
|
time you try to generate an image. Fortunately, after the warm up period
|
||||||
|
rendering will be fast.
|
||||||
|
|
||||||
|
11. Subsequently, to relaunch the script, be sure to run "conda activate
|
||||||
|
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
||||||
|
script. If you forget to activate the 'invokeai' environment, the script
|
||||||
|
will fail with multiple `ModuleNotFound` errors.
|
||||||
|
|
||||||
|
## Creating an "install" version of InvokeAI
|
||||||
|
|
||||||
|
If you wish you can install InvokeAI and all its dependencies in the
|
||||||
|
runtime directory. This allows you to delete the source code
|
||||||
|
repository and eliminates the need to provide `--root_dir` at startup
|
||||||
|
time. Note that this method only works with the PIP method.
|
||||||
|
|
||||||
|
1. Follow the instructions for the PIP install, but in step #2 put the
|
||||||
|
virtual environment into the runtime directory. For example, assuming the
|
||||||
|
runtime directory lives in `~/Programs/invokeai`, you'd run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python -menv ~/Programs/invokeai
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Now follow steps 3 to 5 in the PIP recipe, ending with the `pip install`
|
||||||
|
step.
|
||||||
|
|
||||||
|
3. Run one additional step while you are in the source code repository
|
||||||
|
directory `pip install .` (note the dot at the end).
|
||||||
|
|
||||||
|
4. That's all! Now, whenever you activate the virtual environment,
|
||||||
|
`invoke.py` will know where to look for the runtime directory without
|
||||||
|
needing a `--root_dir` argument. In addition, you can now move or
|
||||||
|
delete the source code repository entirely.
|
||||||
|
|
||||||
|
(Don't move the runtime directory!)
|
||||||
|
|
||||||
|
## Updating to newer versions of the script
|
||||||
|
|
||||||
|
This distribution is changing rapidly. If you used the `git clone` method
|
||||||
|
(step 5) to download the InvokeAI directory, then to update to the latest and
|
||||||
|
greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git pull
|
||||||
|
conda env update
|
||||||
|
python scripts/configure_invokeai.py --no-interactive #optional
|
||||||
|
```
|
||||||
|
|
||||||
|
This will bring your local copy into sync with the remote one. The last step may
|
||||||
|
be needed to take advantage of new features or released models. The
|
||||||
|
`--no-interactive` flag will prevent the script from prompting you to download
|
||||||
|
the big Stable Diffusion weights files.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
Here are some common issues and their suggested solutions.
|
||||||
|
|
||||||
|
### Conda
|
||||||
|
|
||||||
|
#### Conda fails before completing `conda update`
|
||||||
|
|
||||||
|
The usual source of these errors is a package incompatibility. While we have
|
||||||
|
tried to minimize these, over time packages get updated and sometimes introduce
|
||||||
|
incompatibilities.
|
||||||
|
|
||||||
|
We suggest that you search
|
||||||
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
|
||||||
|
channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
||||||
|
|
||||||
|
You may also try to install the broken packages manually using PIP. To do this,
|
||||||
|
activate the `invokeai` environment, and run `pip install` with the name and
|
||||||
|
version of the package that is causing the incompatibility. For example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install test-tube==0.7.5
|
||||||
|
```
|
||||||
|
|
||||||
|
You can keep doing this until all requirements are satisfied and the `invoke.py`
|
||||||
|
script runs without errors. Please report to
|
||||||
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
|
||||||
|
to work around the problem so that others can benefit from your investigation.
|
||||||
|
|
||||||
|
### Create Conda Environment fails on MacOS
|
||||||
|
|
||||||
|
If conda create environment fails with lmdb error, this is most likely caused by Clang.
|
||||||
|
Run brew config to see which Clang is installed on your Mac. If Clang isn't installed, that's causing the error.
|
||||||
|
Start by installing additional XCode command line tools, followed by brew install llvm.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
xcode-select --install
|
||||||
|
brew install llvm
|
||||||
|
```
|
||||||
|
|
||||||
|
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
||||||
|
|
||||||
|
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
|
||||||
|
|
||||||
|
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
||||||
|
have linked to the correct environment file and run `conda update` again.
|
||||||
|
|
||||||
|
If the problem persists, a more extreme measure is to clear Conda's caches and
|
||||||
|
remove the `invokeai` environment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda deactivate
|
||||||
|
conda env remove -n invokeai
|
||||||
|
conda clean -a
|
||||||
|
conda update
|
||||||
|
```
|
||||||
|
|
||||||
|
This removes all cached library files, including ones that may have been
|
||||||
|
corrupted somehow. (This is not supposed to happen, but does anyway).
|
||||||
|
|
||||||
|
#### `invoke.py` crashes at a later stage
|
||||||
|
|
||||||
|
If the CLI or web site had been working ok, but something unexpected happens
|
||||||
|
later on during the session, you've encountered a code bug that is probably
|
||||||
|
unrelated to an install issue. Please search
|
||||||
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
|
||||||
|
ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
|
||||||
|
|
||||||
|
#### My renders are running very slowly
|
||||||
|
|
||||||
|
You may have installed the wrong torch (machine learning) package, and the
|
||||||
|
system is running on CPU rather than the GPU. To check, look at the log messages
|
||||||
|
that appear when `invoke.py` is first starting up. One of the earlier lines
|
||||||
|
should say `Using device type cuda`. On AMD systems, it will also say "cuda",
|
||||||
|
and on Macintoshes, it should say "mps". If instead the message says it is
|
||||||
|
running on "cpu", then you may need to install the correct torch library.
|
||||||
|
|
||||||
|
You may be able to fix this by installing a different torch library. Here are
|
||||||
|
the magic incantations for Conda and PIP.
|
||||||
|
|
||||||
|
!!! todo "For CUDA systems"
|
||||||
|
|
||||||
|
- conda
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
|
||||||
|
```
|
||||||
|
|
||||||
|
- pip
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! todo "For AMD systems"
|
||||||
|
|
||||||
|
- conda
|
||||||
|
|
||||||
|
```bash
|
||||||
|
conda activate invokeai
|
||||||
|
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||||
|
```
|
||||||
|
|
||||||
|
- pip
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||||
|
```
|
||||||
|
|
||||||
|
More information and troubleshooting tips can be found at https://pytorch.org.
|
@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
title: Docker
|
title: Installing with Docker
|
||||||
---
|
---
|
||||||
|
|
||||||
# :fontawesome-brands-docker: Docker
|
# :fontawesome-brands-docker: Docker
|
||||||
@ -81,11 +81,12 @@ Some Suggestions of variables you may want to change besides the Token:
|
|||||||
| Environment-Variable | Default value | Description |
|
| Environment-Variable | Default value | Description |
|
||||||
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
|
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
|
||||||
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
|
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
|
||||||
| `PROJECT_NAME` | `invokeai` | affects the project folder, tag- and volume name |
|
| `REPOSITORY_NAME` | The Basename of the Repo folder | This name will used as the container repository/image name |
|
||||||
| `VOLUMENAME` | `${PROJECT_NAME}_data` | Name of the Docker Volume where model files will be stored |
|
| `VOLUMENAME` | `${REPOSITORY_NAME,,}_data` | Name of the Docker Volume where model files will be stored |
|
||||||
| `ARCH` | `x86_64` | can be changed to f.e. aarch64 if you are using a ARM based CPU |
|
| `ARCH` | arch of the build machine | can be changed if you want to build the image for another arch |
|
||||||
| `INVOKEAI_TAG` | `${PROJECT_NAME}:${ARCH}` | the Container Repository / Tag which will be used |
|
| `INVOKEAI_TAG` | latest | the Container Repository / Tag which will be used |
|
||||||
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
|
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
|
||||||
|
| `CONTAINER_FLAVOR` | cuda | the flavor of the image, which can be changed if you build f.e. with amd requirements file. |
|
||||||
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
|
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
|
||||||
|
|
||||||
</figure>
|
</figure>
|
@ -1,315 +0,0 @@
|
|||||||
---
|
|
||||||
title: InvokeAI Automated Installation
|
|
||||||
---
|
|
||||||
|
|
||||||
# InvokeAI Automated Installation
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
The automated installer is a shell script that attempts to automate every step
|
|
||||||
needed to install and run InvokeAI on a stock computer running recent versions
|
|
||||||
of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
|
||||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
|
||||||
|
|
||||||
## Walk through
|
|
||||||
|
|
||||||
1. Make sure that your system meets the
|
|
||||||
[hardware requirements](../index.md#hardware-requirements) and has the
|
|
||||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
|
||||||
with an AMD GPU installed, you may need to install the
|
|
||||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
|
||||||
|
|
||||||
!!! info "Required Space"
|
|
||||||
|
|
||||||
Installation requires roughly 18G of free disk space to load the libraries and
|
|
||||||
recommended model weights files.
|
|
||||||
|
|
||||||
2. Check that your system has an up-to-date Python installed. To do this, open
|
|
||||||
up a command-line window ("Terminal" on Linux and Macintosh, "Command" or
|
|
||||||
"Powershell" on Windows) and type `python --version`. If Python is
|
|
||||||
installed, it will print out the version number. If it is version `3.9.1` or
|
|
||||||
higher, you meet requirements.
|
|
||||||
|
|
||||||
!!! warning "If you see an older version, or get a command not found error"
|
|
||||||
|
|
||||||
Go to [Python Downloads](https://www.python.org/downloads/) and
|
|
||||||
download the appropriate installer package for your platform. We recommend
|
|
||||||
[Version 3.10.9](https://www.python.org/downloads/release/python-3109/),
|
|
||||||
which has been extensively tested with InvokeAI.
|
|
||||||
|
|
||||||
!!! warning "At this time we do not recommend Python 3.11"
|
|
||||||
|
|
||||||
=== "Windows users"
|
|
||||||
|
|
||||||
- During the Python configuration process,
|
|
||||||
Please look out for a checkbox to add Python to your PATH
|
|
||||||
and select it. If the install script complains that it can't
|
|
||||||
find python, then open the Python installer again and choose
|
|
||||||
"Modify" existing installation.
|
|
||||||
|
|
||||||
- There is a slight possibility that you will encountered
|
|
||||||
DLL load errors at the very end of the installation process. This is caused
|
|
||||||
by not having up to date Visual C++ redistributable libraries. If this
|
|
||||||
happens to you, you can install the C++ libraries from this site:
|
|
||||||
https://learn.microsoft.com/en-us/cpp/windows/deploying-native-desktop-applications-visual-cpp?view=msvc-170
|
|
||||||
|
|
||||||
=== "Mac users"
|
|
||||||
|
|
||||||
- After installing Python, you may need to run the
|
|
||||||
following command from the Terminal in order to install the Web
|
|
||||||
certificates needed to download model data from https sites. If
|
|
||||||
you see lots of CERTIFICATE ERRORS during the last part of the
|
|
||||||
install, this is the problem, and you can fix it with this command:
|
|
||||||
|
|
||||||
`/Applications/Python\ 3.10/Install\ Certificates.command`
|
|
||||||
|
|
||||||
- You may need to install the Xcode command line tools. These
|
|
||||||
are a set of tools that are needed to run certain applications in a
|
|
||||||
Terminal, including InvokeAI. This package is provided directly by Apple.
|
|
||||||
|
|
||||||
- To install, open a terminal window and run `xcode-select
|
|
||||||
--install`. You will get a macOS system popup guiding you through the
|
|
||||||
install. If you already have them installed, you will instead see some
|
|
||||||
output in the Terminal advising you that the tools are already installed.
|
|
||||||
|
|
||||||
- More information can be found here:
|
|
||||||
https://www.freecodecamp.org/news/install-xcode-command-line-tools/
|
|
||||||
|
|
||||||
=== "Linux users"
|
|
||||||
|
|
||||||
- See [Installing Python in Ubuntu](#installing-python-in-ubuntu) for some
|
|
||||||
platform-specific tips.
|
|
||||||
|
|
||||||
3. The source installer is distributed in ZIP files. Go to the
|
|
||||||
[latest release](https://github.com/invoke-ai/InvokeAI/releases/latest), and
|
|
||||||
look for a series of files named:
|
|
||||||
|
|
||||||
- [InvokeAI-installer-2.2.4-mac.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-mac.zip)
|
|
||||||
- [InvokeAI-installer-2.2.4-windows.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-windows.zip)
|
|
||||||
- [InvokeAI-installer-2.2.4-linux.zip](https://github.com/invoke-ai/InvokeAI/releases/latest/download/InvokeAI-installer-2.2.4-linux.zip)
|
|
||||||
|
|
||||||
Download the one that is appropriate for your operating system.
|
|
||||||
|
|
||||||
4. Unpack the zip file into a convenient directory. This will create a new
|
|
||||||
directory named "InvokeAI-Installer". This example shows how this would look
|
|
||||||
using the `unzip` command-line tool, but you may use any graphical or
|
|
||||||
command-line Zip extractor:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
C:\Documents\Linco> unzip InvokeAI-installer-2.2.4-windows.zip
|
|
||||||
Archive: C: \Linco\Downloads\InvokeAI-installer-2.2.4-windows.zip
|
|
||||||
creating: InvokeAI-Installer\
|
|
||||||
inflating: InvokeAI-Installer\install.bat
|
|
||||||
inflating: InvokeAI-Installer\readme.txt
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
After successful installation, you can delete the `InvokeAI-Installer`
|
|
||||||
directory.
|
|
||||||
|
|
||||||
5. **Windows only** Please double-click on the file WinLongPathsEnabled.reg and
|
|
||||||
accept the dialog box that asks you if you wish to modify your registry.
|
|
||||||
This activates long filename support on your system and will prevent
|
|
||||||
mysterious errors during installation.
|
|
||||||
|
|
||||||
6. If you are using a desktop GUI, double-click the installer file. It will be
|
|
||||||
named `install.bat` on Windows systems and `install.sh` on Linux and
|
|
||||||
Macintosh systems.
|
|
||||||
|
|
||||||
On Windows systems you will probably get an "Untrusted Publisher" warning.
|
|
||||||
Click on "More Info" and select "Run Anyway." You trust us, right?
|
|
||||||
|
|
||||||
7. Alternatively, from the command line, run the shell script or .bat file:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
C:\Documents\Linco> cd InvokeAI-Installer
|
|
||||||
C:\Documents\Linco\invokeAI> install.bat
|
|
||||||
```
|
|
||||||
|
|
||||||
8. The script will ask you to choose where to install InvokeAI. Select a
|
|
||||||
directory with at least 18G of free space for a full install. InvokeAI and
|
|
||||||
all its support files will be installed into a new directory named
|
|
||||||
`invokeai` located at the location you specify.
|
|
||||||
|
|
||||||
- The default is to install the `invokeai` directory in your home directory,
|
|
||||||
usually `C:\Users\YourName\invokeai` on Windows systems,
|
|
||||||
`/home/YourName/invokeai` on Linux systems, and `/Users/YourName/invokeai`
|
|
||||||
on Macintoshes, where "YourName" is your login name.
|
|
||||||
|
|
||||||
- The script uses tab autocompletion to suggest directory path completions.
|
|
||||||
Type part of the path (e.g. "C:\Users") and press ++tab++ repeatedly
|
|
||||||
to suggest completions.
|
|
||||||
|
|
||||||
9. Sit back and let the install script work. It will install the third-party
|
|
||||||
libraries needed by InvokeAI, then download the current InvokeAI release and
|
|
||||||
install it.
|
|
||||||
|
|
||||||
Be aware that some of the library download and install steps take a long
|
|
||||||
time. In particular, the `pytorch` package is quite large and often appears
|
|
||||||
to get "stuck" at 99.9%. Have patience and the installation step will
|
|
||||||
eventually resume. However, there are occasions when the library install
|
|
||||||
does legitimately get stuck. If you have been waiting for more than ten
|
|
||||||
minutes and nothing is happening, you can interrupt the script with ^C. You
|
|
||||||
may restart it and it will pick up where it left off.
|
|
||||||
|
|
||||||
10. After installation completes, the installer will launch a script called
|
|
||||||
`configure_invokeai.py`, which will guide you through the first-time process
|
|
||||||
of selecting one or more Stable Diffusion model weights files, downloading
|
|
||||||
and configuring them. We provide a list of popular models that InvokeAI
|
|
||||||
performs well with. However, you can add more weight files later on using
|
|
||||||
the command-line client or the Web UI. See
|
|
||||||
[Installing Models](INSTALLING_MODELS.md) for details.
|
|
||||||
|
|
||||||
Note that the main Stable Diffusion weights file is protected by a license
|
|
||||||
agreement that you must agree to in order to use. The script will list the
|
|
||||||
steps you need to take to create an account on the official site that hosts
|
|
||||||
the weights files, accept the agreement, and provide an access token that
|
|
||||||
allows InvokeAI to legally download and install the weights files.
|
|
||||||
|
|
||||||
If you have already downloaded the weights file(s) for another Stable
|
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
|
||||||
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
|
||||||
|
|
||||||
11. The script will now exit and you'll be ready to generate some images. Look
|
|
||||||
for the directory `invokeai` installed in the location you chose at the
|
|
||||||
beginning of the install session. Look for a shell script named `invoke.sh`
|
|
||||||
(Linux/Mac) or `invoke.bat` (Windows). Launch the script by double-clicking
|
|
||||||
it or typing its name at the command-line:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
C:\Documents\Linco> cd invokeai
|
|
||||||
C:\Documents\Linco\invokeAI> invoke.bat
|
|
||||||
```
|
|
||||||
|
|
||||||
- The `invoke.bat` (`invoke.sh`) script will give you the choice of starting
|
|
||||||
(1) the command-line interface, or (2) the web GUI. If you start the
|
|
||||||
latter, you can load the user interface by pointing your browser at
|
|
||||||
http://localhost:9090.
|
|
||||||
|
|
||||||
- The script also offers you a third option labeled "open the developer
|
|
||||||
console". If you choose this option, you will be dropped into a
|
|
||||||
command-line interface in which you can run python commands directly,
|
|
||||||
access developer tools, and launch InvokeAI with customized options.
|
|
||||||
|
|
||||||
12. You can launch InvokeAI with several different command-line arguments that
|
|
||||||
customize its behavior. For example, you can change the location of the
|
|
||||||
image output directory, or select your favorite sampler. See the
|
|
||||||
[Command-Line Interface](../features/CLI.md) for a full list of the options.
|
|
||||||
|
|
||||||
- To set defaults that will take effect every time you launch InvokeAI,
|
|
||||||
use a text editor (e.g. Notepad) to exit the file
|
|
||||||
`invokeai\invokeai.init`. It contains a variety of examples that you can
|
|
||||||
follow to add and modify launch options.
|
|
||||||
|
|
||||||
!!! warning "The `invokeai` directory contains the `invoke` application, its
|
|
||||||
configuration files, the model weight files, and outputs of image generation.
|
|
||||||
Once InvokeAI is installed, do not move or remove this directory."
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### _Package dependency conflicts_
|
|
||||||
|
|
||||||
If you have previously installed InvokeAI or another Stable Diffusion package,
|
|
||||||
the installer may occasionally pick up outdated libraries and either the
|
|
||||||
installer or `invoke` will fail with complaints about library conflicts. You can
|
|
||||||
address this by entering the `invokeai` directory and running `update.sh`, which
|
|
||||||
will bring InvokeAI up to date with the latest libraries.
|
|
||||||
|
|
||||||
### ldm from pypi
|
|
||||||
|
|
||||||
!!! warning
|
|
||||||
|
|
||||||
Some users have tried to correct dependency problems by installing
|
|
||||||
the `ldm` package from PyPi.org. Unfortunately this is an unrelated package that
|
|
||||||
has nothing to do with the 'latent diffusion model' used by InvokeAI. Installing
|
|
||||||
ldm will make matters worse. If you've installed ldm, uninstall it with
|
|
||||||
`pip uninstall ldm`.
|
|
||||||
|
|
||||||
### Corrupted configuration file
|
|
||||||
|
|
||||||
Everything seems to install ok, but `invoke` complains of a corrupted
|
|
||||||
configuration file and goes back into the configuration process (asking you to
|
|
||||||
download models, etc), but this doesn't fix the problem.
|
|
||||||
|
|
||||||
This issue is often caused by a misconfigured configuration directive in the
|
|
||||||
`invokeai\invokeai.init` initialization file that contains startup settings. The
|
|
||||||
easiest way to fix the problem is to move the file out of the way and re-run
|
|
||||||
`configure_invokeai.py`. Enter the developer's console (option 3 of the launcher
|
|
||||||
script) and run this command:
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
configure_invokeai.py --root=.
|
|
||||||
```
|
|
||||||
|
|
||||||
Note the dot (.) after `--root`. It is part of the command.
|
|
||||||
|
|
||||||
_If none of these maneuvers fixes the problem_ then please report the problem to
|
|
||||||
the [InvokeAI Issues](https://github.com/invoke-ai/InvokeAI/issues) section, or
|
|
||||||
visit our [Discord Server](https://discord.gg/ZmtBAhwWhy) for interactive
|
|
||||||
assistance.
|
|
||||||
|
|
||||||
### other problems
|
|
||||||
|
|
||||||
If you run into problems during or after installation, the InvokeAI team is
|
|
||||||
available to help you. Either create an
|
|
||||||
[Issue](https://github.com/invoke-ai/InvokeAI/issues) at our GitHub site, or
|
|
||||||
make a request for help on the "bugs-and-support" channel of our
|
|
||||||
[Discord server](https://discord.gg/ZmtBAhwWhy). We are a 100% volunteer
|
|
||||||
organization, but typically somebody will be available to help you within 24
|
|
||||||
hours, and often much sooner.
|
|
||||||
|
|
||||||
## Updating to newer versions
|
|
||||||
|
|
||||||
This distribution is changing rapidly, and we add new features on a daily basis.
|
|
||||||
To update to the latest released version (recommended), run the `update.sh`
|
|
||||||
(Linux/Mac) or `update.bat` (Windows) scripts. This will fetch the latest
|
|
||||||
release and re-run the `configure_invokeai` script to download any updated
|
|
||||||
models files that may be needed. You can also use this to add additional models
|
|
||||||
that you did not select at installation time.
|
|
||||||
|
|
||||||
You can now close the developer console and run `invoke` as before. If you get
|
|
||||||
complaints about missing models, then you may need to do the additional step of
|
|
||||||
running `configure_invokeai.py`. This happens relatively infrequently. To do
|
|
||||||
this, simply open up the developer's console again and type
|
|
||||||
`python scripts/configure_invokeai.py`.
|
|
||||||
|
|
||||||
You may also use the `update` script to install any selected version of
|
|
||||||
InvokeAI. From https://github.com/invoke-ai/InvokeAI, navigate to the zip file
|
|
||||||
link of the version you wish to install. You can find the zip links by going to
|
|
||||||
the one of the release pages and looking for the **Assets** section at the
|
|
||||||
bottom. Alternatively, you can browse "branches" and "tags" at the top of the
|
|
||||||
big code directory on the InvokeAI welcome page. When you find the version you
|
|
||||||
want to install, go to the green "<> Code" button at the top, and copy the
|
|
||||||
"Download ZIP" link.
|
|
||||||
|
|
||||||
Now run `update.sh` (or `update.bat`) with the URL of the desired InvokeAI
|
|
||||||
version as its argument. For example, this will install the old 2.2.0 release.
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
update.sh https://github.com/invoke-ai/InvokeAI/archive/refs/tags/v2.2.0.zip
|
|
||||||
```
|
|
||||||
|
|
||||||
## Installing Python in Ubuntu
|
|
||||||
|
|
||||||
For reasons that are not entirely clear, installing the correct version of
|
|
||||||
Python can be a bit of a challenge on Ubuntu, Linux Mint, and other
|
|
||||||
Ubuntu-derived distributions.
|
|
||||||
|
|
||||||
In particular, Ubuntu version 20.04 LTS comes with an old version of Python,
|
|
||||||
does not come with the PIP package manager installed, and to make matters worse,
|
|
||||||
the `python` command points to Python2, not Python3.
|
|
||||||
|
|
||||||
Here is the quick recipe for bringing your system up to date:
|
|
||||||
|
|
||||||
```
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install python3.9
|
|
||||||
sudo apt install python3-pip
|
|
||||||
cd /usr/bin
|
|
||||||
sudo ln -sf python3.9 python3
|
|
||||||
sudo ln -sf python3 python
|
|
||||||
```
|
|
||||||
|
|
||||||
You can still access older versions of Python by calling `python2`, `python3.8`,
|
|
||||||
etc.
|
|
1
docs/installation/INSTALL_AUTOMATED.md
Symbolic link
1
docs/installation/INSTALL_AUTOMATED.md
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
010_INSTALL_AUTOMATED.md
|
@ -1,429 +0,0 @@
|
|||||||
---
|
|
||||||
title: Manual Installation
|
|
||||||
---
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
# :fontawesome-brands-linux: Linux | :fontawesome-brands-apple: macOS | :fontawesome-brands-windows: Windows
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
!!! warning "This is for advanced Users"
|
|
||||||
|
|
||||||
who are already experienced with using conda or pip
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
You have two choices for manual installation, the [first one](#Conda_method)
|
|
||||||
based on the Anaconda3 package manager (`conda`), and
|
|
||||||
[a second one](#PIP_method) which uses basic Python virtual environment (`venv`)
|
|
||||||
commands and the PIP package manager. Both methods require you to enter commands
|
|
||||||
on the terminal, also known as the "console".
|
|
||||||
|
|
||||||
On Windows systems you are encouraged to install and use the
|
|
||||||
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
|
|
||||||
which provides compatibility with Linux and Mac shells and nice features such as
|
|
||||||
command-line completion.
|
|
||||||
|
|
||||||
### Conda method
|
|
||||||
|
|
||||||
1. Check that your system meets the
|
|
||||||
[hardware requirements](index.md#Hardware_Requirements) and has the
|
|
||||||
appropriate GPU drivers installed. In particular, if you are a Linux user
|
|
||||||
with an AMD GPU installed, you may need to install the
|
|
||||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
|
||||||
|
|
||||||
InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
|
|
||||||
of ROCm driver support on this platform.
|
|
||||||
|
|
||||||
To confirm that the appropriate drivers are installed, run `nvidia-smi` on
|
|
||||||
NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
|
|
||||||
information about the installed video card.
|
|
||||||
|
|
||||||
Macintosh users with MPS acceleration, or anybody with a CPU-only system,
|
|
||||||
can skip this step.
|
|
||||||
|
|
||||||
2. You will need to install Anaconda3 and Git if they are not already
|
|
||||||
available. Use your operating system's preferred package manager, or
|
|
||||||
download the installers manually. You can find them here:
|
|
||||||
|
|
||||||
- [Anaconda3](https://www.anaconda.com/)
|
|
||||||
- [git](https://git-scm.com/downloads)
|
|
||||||
|
|
||||||
3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
|
|
||||||
GitHub:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git clone https://github.com/invoke-ai/InvokeAI.git
|
|
||||||
```
|
|
||||||
|
|
||||||
This will create InvokeAI folder where you will follow the rest of the
|
|
||||||
steps.
|
|
||||||
|
|
||||||
4. Enter the newly-created InvokeAI folder:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd InvokeAI
|
|
||||||
```
|
|
||||||
|
|
||||||
From this step forward make sure that you are working in the InvokeAI
|
|
||||||
directory!
|
|
||||||
|
|
||||||
5. Select the appropriate environment file:
|
|
||||||
|
|
||||||
We have created a series of environment files suited for different operating
|
|
||||||
systems and GPU hardware. They are located in the
|
|
||||||
`environments-and-requirements` directory:
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
|
|
||||||
| filename | OS |
|
|
||||||
| :----------------------: | :----------------------------: |
|
|
||||||
| environment-lin-amd.yml | Linux with an AMD (ROCm) GPU |
|
|
||||||
| environment-lin-cuda.yml | Linux with an NVIDIA CUDA GPU |
|
|
||||||
| environment-mac.yml | Macintosh |
|
|
||||||
| environment-win-cuda.yml | Windows with an NVIDA CUDA GPU |
|
|
||||||
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
Choose the appropriate environment file for your system and link or copy it
|
|
||||||
to `environment.yml` in InvokeAI's top-level directory. To do so, run
|
|
||||||
following command from the repository-root:
|
|
||||||
|
|
||||||
!!! Example ""
|
|
||||||
|
|
||||||
=== "Macintosh and Linux"
|
|
||||||
|
|
||||||
!!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
When this is done, confirm that a file `environment.yml` has been linked in
|
|
||||||
the InvokeAI root directory and that it points to the correct file in the
|
|
||||||
`environments-and-requirements`.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ls -la
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Windows"
|
|
||||||
|
|
||||||
!!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
copy environments-and-requirements\environment-win-cuda.yml environment.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
Afterwards verify that the file `environment.yml` has been created, either via the
|
|
||||||
explorer or by using the command `dir` from the terminal
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
dir
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
|
|
||||||
|
|
||||||
6. Create the conda environment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda env update
|
|
||||||
```
|
|
||||||
|
|
||||||
This will create a new environment named `invokeai` and install all InvokeAI
|
|
||||||
dependencies into it. If something goes wrong you should take a look at
|
|
||||||
[troubleshooting](#troubleshooting).
|
|
||||||
|
|
||||||
7. Activate the `invokeai` environment:
|
|
||||||
|
|
||||||
In order to use the newly created environment you will first need to
|
|
||||||
activate it
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda activate invokeai
|
|
||||||
```
|
|
||||||
|
|
||||||
Your command-line prompt should change to indicate that `invokeai` is active
|
|
||||||
by prepending `(invokeai)`.
|
|
||||||
|
|
||||||
8. Pre-Load the model weights files:
|
|
||||||
|
|
||||||
!!! tip
|
|
||||||
|
|
||||||
If you have already downloaded the weights file(s) for another Stable
|
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
|
||||||
process for this is described in [here](INSTALLING_MODELS.md).
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python scripts/configure_invokeai.py
|
|
||||||
```
|
|
||||||
|
|
||||||
The script `configure_invokeai.py` will interactively guide you through the
|
|
||||||
process of downloading and installing the weights files needed for InvokeAI.
|
|
||||||
Note that the main Stable Diffusion weights file is protected by a license
|
|
||||||
agreement that you have to agree to. The script will list the steps you need
|
|
||||||
to take to create an account on the site that hosts the weights files,
|
|
||||||
accept the agreement, and provide an access token that allows InvokeAI to
|
|
||||||
legally download and install the weights files.
|
|
||||||
|
|
||||||
If you get an error message about a module not being installed, check that
|
|
||||||
the `invokeai` environment is active and if not, repeat step 5.
|
|
||||||
|
|
||||||
9. Run the command-line- or the web- interface:
|
|
||||||
|
|
||||||
!!! example ""
|
|
||||||
|
|
||||||
!!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
|
|
||||||
|
|
||||||
=== "CLI"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python scripts/invoke.py
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "local Webserver"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python scripts/invoke.py --web
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Public Webserver"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python scripts/invoke.py --web --host 0.0.0.0
|
|
||||||
```
|
|
||||||
|
|
||||||
If you choose the run the web interface, point your browser at
|
|
||||||
http://localhost:9090 in order to load the GUI.
|
|
||||||
|
|
||||||
10. Render away!
|
|
||||||
|
|
||||||
Browse the [features](../features/CLI.md) section to learn about all the things you
|
|
||||||
can do with InvokeAI.
|
|
||||||
|
|
||||||
Note that some GPUs are slow to warm up. In particular, when using an AMD
|
|
||||||
card with the ROCm driver, you may have to wait for over a minute the first
|
|
||||||
time you try to generate an image. Fortunately, after the warm up period
|
|
||||||
rendering will be fast.
|
|
||||||
|
|
||||||
11. Subsequently, to relaunch the script, be sure to run "conda activate
|
|
||||||
invokeai", enter the `InvokeAI` directory, and then launch the invoke
|
|
||||||
script. If you forget to activate the 'invokeai' environment, the script
|
|
||||||
will fail with multiple `ModuleNotFound` errors.
|
|
||||||
|
|
||||||
## Updating to newer versions of the script
|
|
||||||
|
|
||||||
This distribution is changing rapidly. If you used the `git clone` method
|
|
||||||
(step 5) to download the InvokeAI directory, then to update to the latest and
|
|
||||||
greatest version, launch the Anaconda window, enter `InvokeAI` and type:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
git pull
|
|
||||||
conda env update
|
|
||||||
python scripts/configure_invokeai.py --no-interactive #optional
|
|
||||||
```
|
|
||||||
|
|
||||||
This will bring your local copy into sync with the remote one. The last step may
|
|
||||||
be needed to take advantage of new features or released models. The
|
|
||||||
`--no-interactive` flag will prevent the script from prompting you to download
|
|
||||||
the big Stable Diffusion weights files.
|
|
||||||
|
|
||||||
## pip Install
|
|
||||||
|
|
||||||
To install InvokeAI with only the PIP package manager, please follow these
|
|
||||||
steps:
|
|
||||||
|
|
||||||
1. Make sure you are using Python 3.9 or higher. The rest of the install
|
|
||||||
procedure depends on this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python -V
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Install the `virtualenv` tool if you don't have it already:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install virtualenv
|
|
||||||
```
|
|
||||||
|
|
||||||
3. From within the InvokeAI top-level directory, create and activate a virtual
|
|
||||||
environment named `invokeai`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
virtualenv invokeai
|
|
||||||
source invokeai/bin/activate
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Pick the correct `requirements*.txt` file for your hardware and operating
|
|
||||||
system.
|
|
||||||
|
|
||||||
We have created a series of environment files suited for different operating
|
|
||||||
systems and GPU hardware. They are located in the
|
|
||||||
`environments-and-requirements` directory:
|
|
||||||
|
|
||||||
<figure markdown>
|
|
||||||
|
|
||||||
| filename | OS |
|
|
||||||
| :---------------------------------: | :-------------------------------------------------------------: |
|
|
||||||
| requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU |
|
|
||||||
| requirements-lin-arm64.txt | Linux running on arm64 systems |
|
|
||||||
| requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU |
|
|
||||||
| requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration |
|
|
||||||
| requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU<br>(supports Google Colab too) |
|
|
||||||
|
|
||||||
</figure>
|
|
||||||
|
|
||||||
Select the appropriate requirements file, and make a link to it from
|
|
||||||
`requirements.txt` in the top-level InvokeAI directory. The command to do
|
|
||||||
this from the top-level directory is:
|
|
||||||
|
|
||||||
!!! example ""
|
|
||||||
|
|
||||||
=== "Macintosh and Linux"
|
|
||||||
|
|
||||||
!!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Windows"
|
|
||||||
|
|
||||||
!!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
|
|
||||||
|
|
||||||
```cmd
|
|
||||||
copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! warning
|
|
||||||
|
|
||||||
Please do not link or copy `environments-and-requirements/requirements-base.txt`.
|
|
||||||
This is a base requirements file that does not have the platform-specific
|
|
||||||
libraries. Also, be sure to link or copy the platform-specific file to
|
|
||||||
a top-level file named `requirements.txt` as shown here. Running pip on
|
|
||||||
a requirements file in a subdirectory will not work as expected.
|
|
||||||
|
|
||||||
When this is done, confirm that a file named `requirements.txt` has been
|
|
||||||
created in the InvokeAI root directory and that it points to the correct
|
|
||||||
file in `environments-and-requirements`.
|
|
||||||
|
|
||||||
5. Run PIP
|
|
||||||
|
|
||||||
Be sure that the `invokeai` environment is active before doing this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install --prefer-binary -r requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
Here are some common issues and their suggested solutions.
|
|
||||||
|
|
||||||
### Conda
|
|
||||||
|
|
||||||
#### Conda fails before completing `conda update`
|
|
||||||
|
|
||||||
The usual source of these errors is a package incompatibility. While we have
|
|
||||||
tried to minimize these, over time packages get updated and sometimes introduce
|
|
||||||
incompatibilities.
|
|
||||||
|
|
||||||
We suggest that you search
|
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
|
|
||||||
channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
|
|
||||||
|
|
||||||
You may also try to install the broken packages manually using PIP. To do this,
|
|
||||||
activate the `invokeai` environment, and run `pip install` with the name and
|
|
||||||
version of the package that is causing the incompatibility. For example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install test-tube==0.7.5
|
|
||||||
```
|
|
||||||
|
|
||||||
You can keep doing this until all requirements are satisfied and the `invoke.py`
|
|
||||||
script runs without errors. Please report to
|
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
|
|
||||||
to work around the problem so that others can benefit from your investigation.
|
|
||||||
|
|
||||||
### Create Conda Environment fails on MacOS
|
|
||||||
|
|
||||||
If conda create environment fails with lmdb error, this is most likely caused by Clang.
|
|
||||||
Run brew config to see which Clang is installed on your Mac. If Clang isn't installed, that's causing the error.
|
|
||||||
Start by installing additional XCode command line tools, followed by brew install llvm.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
xcode-select --install
|
|
||||||
brew install llvm
|
|
||||||
```
|
|
||||||
|
|
||||||
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
|
|
||||||
|
|
||||||
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
|
|
||||||
|
|
||||||
This is usually due to an incomplete or corrupted Conda install. Make sure you
|
|
||||||
have linked to the correct environment file and run `conda update` again.
|
|
||||||
|
|
||||||
If the problem persists, a more extreme measure is to clear Conda's caches and
|
|
||||||
remove the `invokeai` environment:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda deactivate
|
|
||||||
conda env remove -n invokeai
|
|
||||||
conda clean -a
|
|
||||||
conda update
|
|
||||||
```
|
|
||||||
|
|
||||||
This removes all cached library files, including ones that may have been
|
|
||||||
corrupted somehow. (This is not supposed to happen, but does anyway).
|
|
||||||
|
|
||||||
#### `invoke.py` crashes at a later stage
|
|
||||||
|
|
||||||
If the CLI or web site had been working ok, but something unexpected happens
|
|
||||||
later on during the session, you've encountered a code bug that is probably
|
|
||||||
unrelated to an install issue. Please search
|
|
||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
|
|
||||||
ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
|
|
||||||
|
|
||||||
#### My renders are running very slowly
|
|
||||||
|
|
||||||
You may have installed the wrong torch (machine learning) package, and the
|
|
||||||
system is running on CPU rather than the GPU. To check, look at the log messages
|
|
||||||
that appear when `invoke.py` is first starting up. One of the earlier lines
|
|
||||||
should say `Using device type cuda`. On AMD systems, it will also say "cuda",
|
|
||||||
and on Macintoshes, it should say "mps". If instead the message says it is
|
|
||||||
running on "cpu", then you may need to install the correct torch library.
|
|
||||||
|
|
||||||
You may be able to fix this by installing a different torch library. Here are
|
|
||||||
the magic incantations for Conda and PIP.
|
|
||||||
|
|
||||||
!!! todo "For CUDA systems"
|
|
||||||
|
|
||||||
- conda
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
|
|
||||||
```
|
|
||||||
|
|
||||||
- pip
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! todo "For AMD systems"
|
|
||||||
|
|
||||||
- conda
|
|
||||||
|
|
||||||
```bash
|
|
||||||
conda activate invokeai
|
|
||||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
|
||||||
```
|
|
||||||
|
|
||||||
- pip
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
|
||||||
```
|
|
||||||
|
|
||||||
More information and troubleshooting tips can be found at https://pytorch.org.
|
|
1
docs/installation/INSTALL_MANUAL.md
Symbolic link
1
docs/installation/INSTALL_MANUAL.md
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
020_INSTALL_MANUAL.md
|
@ -10,7 +10,7 @@ InvokeAI is released, you will download and reinstall the new version.
|
|||||||
If you wish to tinker with unreleased versions of InvokeAI that introduce
|
If you wish to tinker with unreleased versions of InvokeAI that introduce
|
||||||
potentially unstable new features, you should consider using the
|
potentially unstable new features, you should consider using the
|
||||||
[source installer](INSTALL_SOURCE.md) or one of the
|
[source installer](INSTALL_SOURCE.md) or one of the
|
||||||
[manual install](INSTALL_MANUAL.md) methods.
|
[manual install](../020_INSTALL_MANUAL.md) methods.
|
||||||
|
|
||||||
**Important Caveats**
|
**Important Caveats**
|
||||||
- This script does not support AMD GPUs. For Linux AMD support,
|
- This script does not support AMD GPUs. For Linux AMD support,
|
@ -12,7 +12,7 @@ of Linux, MacOS or Windows. It will leave you with a version that runs a stable
|
|||||||
version of InvokeAI with the option to upgrade to experimental versions later.
|
version of InvokeAI with the option to upgrade to experimental versions later.
|
||||||
|
|
||||||
Before you begin, make sure that you meet the
|
Before you begin, make sure that you meet the
|
||||||
[hardware requirements](index.md#Hardware_Requirements) and has the appropriate
|
[hardware requirements](../../index.md#hardware-requirements) and has the appropriate
|
||||||
GPU drivers installed. In particular, if you are a Linux user with an AMD GPU
|
GPU drivers installed. In particular, if you are a Linux user with an AMD GPU
|
||||||
installed, you may need to install the
|
installed, you may need to install the
|
||||||
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
|
||||||
@ -100,7 +100,7 @@ off the process.
|
|||||||
If you have already downloaded the weights file(s) for another Stable
|
If you have already downloaded the weights file(s) for another Stable
|
||||||
Diffusion distribution, you may skip this step (by selecting "skip" when
|
Diffusion distribution, you may skip this step (by selecting "skip" when
|
||||||
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
prompted) and configure InvokeAI to use the previously-downloaded files. The
|
||||||
process for this is described in [Installing Models](INSTALLING_MODELS.md).
|
process for this is described in [Installing Models](../050_INSTALLING_MODELS.md).
|
||||||
|
|
||||||
8. The script will now exit and you'll be ready to generate some images. The
|
8. The script will now exit and you'll be ready to generate some images. The
|
||||||
invokeAI directory will contain numerous files. Look for a shell script
|
invokeAI directory will contain numerous files. Look for a shell script
|
||||||
@ -128,7 +128,7 @@ python scripts/invoke.py --web --max_load_models=3 \
|
|||||||
```
|
```
|
||||||
|
|
||||||
These options are described in detail in the
|
These options are described in detail in the
|
||||||
[Command-Line Interface](../features/CLI.md) documentation.
|
[Command-Line Interface](../../features/CLI.md) documentation.
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
@ -5,14 +5,14 @@ title: Overview
|
|||||||
We offer several ways to install InvokeAI, each one suited to your
|
We offer several ways to install InvokeAI, each one suited to your
|
||||||
experience and preferences.
|
experience and preferences.
|
||||||
|
|
||||||
1. [Automated Installer](INSTALL_AUTOMATED.md)
|
1. [Automated Installer](010_INSTALL_AUTOMATED.md)
|
||||||
|
|
||||||
This is a script that will install all of InvokeAI's essential
|
This is a script that will install all of InvokeAI's essential
|
||||||
third party libraries and InvokeAI itself. It includes access to a
|
third party libraries and InvokeAI itself. It includes access to a
|
||||||
"developer console" which will help us debug problems with you and
|
"developer console" which will help us debug problems with you and
|
||||||
give you to access experimental features.
|
give you to access experimental features.
|
||||||
|
|
||||||
2. [Manual Installation](INSTALL_MANUAL.md)
|
2. [Manual Installation](020_INSTALL_MANUAL.md)
|
||||||
|
|
||||||
In this method you will manually run the commands needed to install
|
In this method you will manually run the commands needed to install
|
||||||
InvokeAI and its dependencies. We offer two recipes: one suited to
|
InvokeAI and its dependencies. We offer two recipes: one suited to
|
||||||
@ -25,10 +25,9 @@ experience and preferences.
|
|||||||
the cutting edge of future InvokeAI development and is willing to put
|
the cutting edge of future InvokeAI development and is willing to put
|
||||||
up with occasional glitches and breakage.
|
up with occasional glitches and breakage.
|
||||||
|
|
||||||
3. [Docker Installation](INSTALL_DOCKER.md)
|
3. [Docker Installation](040_INSTALL_DOCKER.md)
|
||||||
|
|
||||||
We also offer a method for creating Docker containers containing
|
We also offer a method for creating Docker containers containing
|
||||||
InvokeAI and its dependencies. This method is recommended for
|
InvokeAI and its dependencies. This method is recommended for
|
||||||
individuals with experience with Docker containers and understand
|
individuals with experience with Docker containers and understand
|
||||||
the pluses and minuses of a container-based install.
|
the pluses and minuses of a container-based install.
|
||||||
|
|
||||||
|
@ -30,7 +30,6 @@ dependencies:
|
|||||||
- torchvision
|
- torchvision
|
||||||
- transformers~=4.25
|
- transformers~=4.25
|
||||||
- pip:
|
- pip:
|
||||||
- dependency_injector==4.40.0
|
|
||||||
- getpass_asterisk
|
- getpass_asterisk
|
||||||
- omegaconf==2.1.1
|
- omegaconf==2.1.1
|
||||||
- picklescan
|
- picklescan
|
||||||
@ -42,5 +41,5 @@ dependencies:
|
|||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||||
- -e .
|
- -e .
|
||||||
|
@ -10,7 +10,6 @@ dependencies:
|
|||||||
- pip:
|
- pip:
|
||||||
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
- --extra-index-url https://download.pytorch.org/whl/rocm5.2/
|
||||||
- albumentations==0.4.3
|
- albumentations==0.4.3
|
||||||
- dependency_injector==4.40.0
|
|
||||||
- diffusers~=0.10
|
- diffusers~=0.10
|
||||||
- einops==0.3.0
|
- einops==0.3.0
|
||||||
- eventlet
|
- eventlet
|
||||||
@ -44,5 +43,5 @@ dependencies:
|
|||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||||
- -e .
|
- -e .
|
||||||
|
@ -14,7 +14,6 @@ dependencies:
|
|||||||
- pip:
|
- pip:
|
||||||
- accelerate~=0.13
|
- accelerate~=0.13
|
||||||
- albumentations==0.4.3
|
- albumentations==0.4.3
|
||||||
- dependency_injector==4.40.0
|
|
||||||
- diffusers~=0.10
|
- diffusers~=0.10
|
||||||
- einops==0.3.0
|
- einops==0.3.0
|
||||||
- eventlet
|
- eventlet
|
||||||
@ -44,5 +43,5 @@ dependencies:
|
|||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||||
- -e .
|
- -e .
|
||||||
|
@ -59,7 +59,7 @@ dependencies:
|
|||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k-diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.2#egg=gfpgan
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||||
- -e .
|
- -e .
|
||||||
variables:
|
variables:
|
||||||
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
PYTORCH_ENABLE_MPS_FALLBACK: 1
|
||||||
|
@ -13,7 +13,6 @@ dependencies:
|
|||||||
- cudatoolkit=11.6
|
- cudatoolkit=11.6
|
||||||
- pip:
|
- pip:
|
||||||
- albumentations==0.4.3
|
- albumentations==0.4.3
|
||||||
- dependency_injector==4.40.0
|
|
||||||
- diffusers~=0.10
|
- diffusers~=0.10
|
||||||
- einops==0.3.0
|
- einops==0.3.0
|
||||||
- eventlet
|
- eventlet
|
||||||
@ -43,5 +42,5 @@ dependencies:
|
|||||||
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
- git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion
|
||||||
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
- git+https://github.com/invoke-ai/clipseg.git@relaxed-python-requirement#egg=clipseg
|
||||||
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan
|
- git+https://github.com/invoke-ai/GFPGAN@basicsr-1.4.1#egg=gfpgan
|
||||||
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.4#egg=pypatchmatch
|
- git+https://github.com/invoke-ai/PyPatchMatch@0.1.5#egg=pypatchmatch
|
||||||
- -e .
|
- -e .
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
# pip will resolve the version which matches torch
|
# pip will resolve the version which matches torch
|
||||||
albumentations
|
albumentations
|
||||||
dependency_injector==4.40.0
|
|
||||||
diffusers[torch]~=0.10
|
diffusers[torch]~=0.10
|
||||||
einops
|
einops
|
||||||
eventlet
|
eventlet
|
||||||
@ -35,6 +34,6 @@ torch-fidelity
|
|||||||
torchmetrics
|
torchmetrics
|
||||||
transformers~=4.25
|
transformers~=4.25
|
||||||
https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip#egg=k-diffusion
|
https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip#egg=k-diffusion
|
||||||
https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.4.zip#egg=pypatchmatch
|
https://github.com/invoke-ai/PyPatchMatch/archive/refs/tags/0.1.5.zip#egg=pypatchmatch
|
||||||
https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip#egg=clip
|
https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip#egg=clip
|
||||||
https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip#egg=clipseg
|
https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip#egg=clipseg
|
||||||
|
@ -1,6 +1,13 @@
|
|||||||
module.exports = {
|
module.exports = {
|
||||||
extends: ['eslint:recommended', 'plugin:@typescript-eslint/recommended', 'plugin:react-hooks/recommended'],
|
extends: [
|
||||||
|
'eslint:recommended',
|
||||||
|
'plugin:@typescript-eslint/recommended',
|
||||||
|
'plugin:react-hooks/recommended',
|
||||||
|
],
|
||||||
parser: '@typescript-eslint/parser',
|
parser: '@typescript-eslint/parser',
|
||||||
plugins: ['@typescript-eslint', 'eslint-plugin-react-hooks'],
|
plugins: ['@typescript-eslint', 'eslint-plugin-react-hooks'],
|
||||||
root: true,
|
root: true,
|
||||||
|
rules: {
|
||||||
|
'@typescript-eslint/no-unused-vars': ['warn', { varsIgnorePattern: '_+' }],
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
<!DOCTYPE html>
|
<!DOCTYPE html>
|
||||||
<html lang="en">
|
<html lang="en">
|
||||||
|
|
||||||
<head>
|
<head>
|
||||||
<meta charset="UTF-8" />
|
<meta charset="UTF-8" />
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
@ -12,5 +11,4 @@
|
|||||||
<div id="root"></div>
|
<div id="root"></div>
|
||||||
<script type="module" src="/src/main.tsx"></script>
|
<script type="module" src="/src/main.tsx"></script>
|
||||||
</body>
|
</body>
|
||||||
|
|
||||||
</html>
|
</html>
|
@ -8,6 +8,10 @@
|
|||||||
"build": "tsc && vite build",
|
"build": "tsc && vite build",
|
||||||
"build-dev": "tsc && vite build -m development",
|
"build-dev": "tsc && vite build -m development",
|
||||||
"preview": "vite preview",
|
"preview": "vite preview",
|
||||||
|
"madge": "madge --circular src/main.tsx",
|
||||||
|
"lint": "eslint src/",
|
||||||
|
"prettier": "prettier '*.{json,cjs,ts,html}' 'src/**/*.{ts,tsx}'",
|
||||||
|
"fmt": "npm run prettier -- --write",
|
||||||
"postinstall": "patch-package"
|
"postinstall": "patch-package"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
@ -58,8 +62,10 @@
|
|||||||
"eslint": "^8.23.0",
|
"eslint": "^8.23.0",
|
||||||
"eslint-plugin-prettier": "^4.2.1",
|
"eslint-plugin-prettier": "^4.2.1",
|
||||||
"eslint-plugin-react-hooks": "^4.6.0",
|
"eslint-plugin-react-hooks": "^4.6.0",
|
||||||
|
"madge": "^5.0.1",
|
||||||
"patch-package": "^6.5.0",
|
"patch-package": "^6.5.0",
|
||||||
"postinstall-postinstall": "^2.1.0",
|
"postinstall-postinstall": "^2.1.0",
|
||||||
|
"prettier": "^2.8.1",
|
||||||
"sass": "^1.55.0",
|
"sass": "^1.55.0",
|
||||||
"terser": "^5.16.1",
|
"terser": "^5.16.1",
|
||||||
"tsc-watch": "^5.0.3",
|
"tsc-watch": "^5.0.3",
|
||||||
@ -67,5 +73,15 @@
|
|||||||
"vite": "^3.0.7",
|
"vite": "^3.0.7",
|
||||||
"vite-plugin-eslint": "^1.8.1",
|
"vite-plugin-eslint": "^1.8.1",
|
||||||
"vite-tsconfig-paths": "^3.5.2"
|
"vite-tsconfig-paths": "^3.5.2"
|
||||||
|
},
|
||||||
|
"madge": {
|
||||||
|
"detectiveOptions": {
|
||||||
|
"ts": {
|
||||||
|
"skipTypeImports": true
|
||||||
|
},
|
||||||
|
"tsx": {
|
||||||
|
"skipTypeImports": true
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,15 +5,15 @@ const Loading = () => {
|
|||||||
<Flex
|
<Flex
|
||||||
width={'100vw'}
|
width={'100vw'}
|
||||||
height={'100vh'}
|
height={'100vh'}
|
||||||
alignItems='center'
|
alignItems="center"
|
||||||
justifyContent='center'
|
justifyContent="center"
|
||||||
>
|
>
|
||||||
<Spinner
|
<Spinner
|
||||||
thickness='2px'
|
thickness="2px"
|
||||||
speed='1s'
|
speed="1s"
|
||||||
emptyColor='gray.200'
|
emptyColor="gray.200"
|
||||||
color='gray.400'
|
color="gray.400"
|
||||||
size='xl'
|
size="xl"
|
||||||
/>
|
/>
|
||||||
</Flex>
|
</Flex>
|
||||||
);
|
);
|
||||||
|
3
frontend/src/app/invokeai.d.ts
vendored
3
frontend/src/app/invokeai.d.ts
vendored
@ -12,8 +12,7 @@
|
|||||||
* 'gfpgan'.
|
* 'gfpgan'.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { Category as GalleryCategory } from 'features/gallery/store/gallerySlice';
|
import { InvokeTabName } from 'features/tabs/tabMap';
|
||||||
import { InvokeTabName } from 'features/tabs/components/InvokeTabs';
|
|
||||||
import { IRect } from 'konva/lib/types';
|
import { IRect } from 'konva/lib/types';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import { createAction } from '@reduxjs/toolkit';
|
import { createAction } from '@reduxjs/toolkit';
|
||||||
import { GalleryCategory } from 'features/gallery/store/gallerySlice';
|
import { GalleryCategory } from 'features/gallery/store/gallerySlice';
|
||||||
import { InvokeTabName } from 'features/tabs/components/InvokeTabs';
|
import { InvokeTabName } from 'features/tabs/tabMap';
|
||||||
import * as InvokeAI from 'app/invokeai';
|
import * as InvokeAI from 'app/invokeai';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -17,16 +17,16 @@ import {
|
|||||||
modelChangeRequested,
|
modelChangeRequested,
|
||||||
setIsProcessing,
|
setIsProcessing,
|
||||||
} from 'features/system/store/systemSlice';
|
} from 'features/system/store/systemSlice';
|
||||||
import { InvokeTabName } from 'features/tabs/components/InvokeTabs';
|
import { InvokeTabName } from 'features/tabs/tabMap';
|
||||||
import * as InvokeAI from 'app/invokeai';
|
import * as InvokeAI from 'app/invokeai';
|
||||||
import { RootState } from 'app/store';
|
import type { RootState } from 'app/store';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns an object containing all functions which use `socketio.emit()`.
|
* Returns an object containing all functions which use `socketio.emit()`.
|
||||||
* i.e. those which make server requests.
|
* i.e. those which make server requests.
|
||||||
*/
|
*/
|
||||||
const makeSocketIOEmitters = (
|
const makeSocketIOEmitters = (
|
||||||
store: MiddlewareAPI<Dispatch<AnyAction>, any>,
|
store: MiddlewareAPI<Dispatch<AnyAction>, RootState>,
|
||||||
socketio: Socket
|
socketio: Socket
|
||||||
) => {
|
) => {
|
||||||
// We need to dispatch actions to redux and get pieces of state from the store.
|
// We need to dispatch actions to redux and get pieces of state from the store.
|
||||||
@ -114,7 +114,7 @@ const makeSocketIOEmitters = (
|
|||||||
const options: OptionsState = getState().options;
|
const options: OptionsState = getState().options;
|
||||||
const { facetoolType, facetoolStrength, codeformerFidelity } = options;
|
const { facetoolType, facetoolStrength, codeformerFidelity } = options;
|
||||||
|
|
||||||
const facetoolParameters: Record<string, any> = {
|
const facetoolParameters: Record<string, unknown> = {
|
||||||
facetool_strength: facetoolStrength,
|
facetool_strength: facetoolStrength,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -39,14 +39,15 @@ import {
|
|||||||
requestSystemConfig,
|
requestSystemConfig,
|
||||||
} from './actions';
|
} from './actions';
|
||||||
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
|
import { addImageToStagingArea } from 'features/canvas/store/canvasSlice';
|
||||||
import { tabMap } from 'features/tabs/components/InvokeTabs';
|
import { tabMap } from 'features/tabs/tabMap';
|
||||||
|
import type { RootState } from 'app/store';
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns an object containing listener callbacks for socketio events.
|
* Returns an object containing listener callbacks for socketio events.
|
||||||
* TODO: This file is large, but simple. Should it be split up further?
|
* TODO: This file is large, but simple. Should it be split up further?
|
||||||
*/
|
*/
|
||||||
const makeSocketIOListeners = (
|
const makeSocketIOListeners = (
|
||||||
store: MiddlewareAPI<Dispatch<AnyAction>, any>
|
store: MiddlewareAPI<Dispatch<AnyAction>, RootState>
|
||||||
) => {
|
) => {
|
||||||
const { dispatch, getState } = store;
|
const { dispatch, getState } = store;
|
||||||
|
|
||||||
@ -100,7 +101,7 @@ const makeSocketIOListeners = (
|
|||||||
*/
|
*/
|
||||||
onGenerationResult: (data: InvokeAI.ImageResultResponse) => {
|
onGenerationResult: (data: InvokeAI.ImageResultResponse) => {
|
||||||
try {
|
try {
|
||||||
const state = getState();
|
const state: RootState = getState();
|
||||||
const { shouldLoopback, activeTab } = state.options;
|
const { shouldLoopback, activeTab } = state.options;
|
||||||
const { boundingBox: _, generationMode, ...rest } = data;
|
const { boundingBox: _, generationMode, ...rest } = data;
|
||||||
|
|
||||||
@ -325,7 +326,10 @@ const makeSocketIOListeners = (
|
|||||||
// remove references to image in options
|
// remove references to image in options
|
||||||
const { initialImage, maskPath } = getState().options;
|
const { initialImage, maskPath } = getState().options;
|
||||||
|
|
||||||
if (initialImage?.url === url || initialImage === url) {
|
if (
|
||||||
|
initialImage === url ||
|
||||||
|
(initialImage as InvokeAI.Image)?.url === url
|
||||||
|
) {
|
||||||
dispatch(clearInitialImage());
|
dispatch(clearInitialImage());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,4 @@
|
|||||||
import { combineReducers, configureStore } from '@reduxjs/toolkit';
|
import { combineReducers, configureStore } from '@reduxjs/toolkit';
|
||||||
import { useDispatch, useSelector } from 'react-redux';
|
|
||||||
import type { TypedUseSelectorHook } from 'react-redux';
|
|
||||||
|
|
||||||
import { persistReducer } from 'redux-persist';
|
import { persistReducer } from 'redux-persist';
|
||||||
import storage from 'redux-persist/lib/storage'; // defaults to localStorage for web
|
import storage from 'redux-persist/lib/storage'; // defaults to localStorage for web
|
||||||
@ -101,7 +99,3 @@ export const store = configureStore({
|
|||||||
export type AppGetState = typeof store.getState;
|
export type AppGetState = typeof store.getState;
|
||||||
export type RootState = ReturnType<typeof store.getState>;
|
export type RootState = ReturnType<typeof store.getState>;
|
||||||
export type AppDispatch = typeof store.dispatch;
|
export type AppDispatch = typeof store.dispatch;
|
||||||
|
|
||||||
// Use throughout your app instead of plain `useDispatch` and `useSelector`
|
|
||||||
export const useAppDispatch: () => AppDispatch = useDispatch;
|
|
||||||
export const useAppSelector: TypedUseSelectorHook<RootState> = useSelector;
|
|
||||||
|
6
frontend/src/app/storeHooks.ts
Normal file
6
frontend/src/app/storeHooks.ts
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
import { TypedUseSelectorHook, useDispatch, useSelector } from 'react-redux';
|
||||||
|
import { AppDispatch, RootState } from './store';
|
||||||
|
|
||||||
|
// Use throughout your app instead of plain `useDispatch` and `useSelector`
|
||||||
|
export const useAppDispatch: () => AppDispatch = useDispatch;
|
||||||
|
export const useAppSelector: TypedUseSelectorHook<RootState> = useSelector;
|
@ -6,7 +6,7 @@ import {
|
|||||||
Box,
|
Box,
|
||||||
} from '@chakra-ui/react';
|
} from '@chakra-ui/react';
|
||||||
import { SystemState } from 'features/system/store/systemSlice';
|
import { SystemState } from 'features/system/store/systemSlice';
|
||||||
import { useAppSelector } from 'app/store';
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import { RootState } from 'app/store';
|
import { RootState } from 'app/store';
|
||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { ReactElement } from 'react';
|
import { ReactElement } from 'react';
|
||||||
|
@ -124,8 +124,8 @@ export default function IAISlider(props: IAIFullSliderProps) {
|
|||||||
onChange(clamped);
|
onChange(clamped);
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleInputChange = (v: any) => {
|
const handleInputChange = (v: number | string) => {
|
||||||
setLocalInputValue(v);
|
setLocalInputValue(String(v));
|
||||||
onChange(Number(v));
|
onChange(Number(v));
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@ import {
|
|||||||
useEffect,
|
useEffect,
|
||||||
KeyboardEvent,
|
KeyboardEvent,
|
||||||
} from 'react';
|
} from 'react';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import { FileRejection, useDropzone } from 'react-dropzone';
|
import { FileRejection, useDropzone } from 'react-dropzone';
|
||||||
import { useToast } from '@chakra-ui/react';
|
import { useToast } from '@chakra-ui/react';
|
||||||
import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext';
|
import { ImageUploaderTriggerContext } from 'app/contexts/ImageUploaderTriggerContext';
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
import { Tooltip } from '@chakra-ui/react';
|
import { Tooltip } from '@chakra-ui/react';
|
||||||
import * as Slider from '@radix-ui/react-slider';
|
import * as Slider from '@radix-ui/react-slider';
|
||||||
import React from 'react';
|
import React from 'react';
|
||||||
import IAITooltip from './IAITooltip';
|
|
||||||
|
|
||||||
type IAISliderProps = Slider.SliderProps & {
|
type IAISliderProps = Slider.SliderProps & {
|
||||||
value: number[];
|
value: number[];
|
||||||
|
@ -20,10 +20,15 @@ const IAITooltip = (props: IAITooltipProps) => {
|
|||||||
<Tooltip.Portal>
|
<Tooltip.Portal>
|
||||||
<Tooltip.Content
|
<Tooltip.Content
|
||||||
{...contentProps}
|
{...contentProps}
|
||||||
onPointerDownOutside={(e: any) => {e.preventDefault()}}
|
onPointerDownOutside={(e) => {
|
||||||
|
e.preventDefault();
|
||||||
|
}}
|
||||||
className="invokeai__tooltip-content"
|
className="invokeai__tooltip-content"
|
||||||
>
|
>
|
||||||
<Tooltip.Arrow {...arrowProps} className="invokeai__tooltip-arrow" />
|
<Tooltip.Arrow
|
||||||
|
{...arrowProps}
|
||||||
|
className="invokeai__tooltip-arrow"
|
||||||
|
/>
|
||||||
{children}
|
{children}
|
||||||
</Tooltip.Content>
|
</Tooltip.Content>
|
||||||
</Tooltip.Portal>
|
</Tooltip.Portal>
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
import { RefObject, useEffect, useRef } from 'react';
|
import { RefObject, useEffect } from 'react';
|
||||||
import { Rect } from 'react-konva';
|
|
||||||
|
|
||||||
const watchers: {
|
const watchers: {
|
||||||
ref: RefObject<HTMLElement>;
|
ref: RefObject<HTMLElement>;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import { createIcon } from "@chakra-ui/react";
|
import { createIcon } from '@chakra-ui/react';
|
||||||
|
|
||||||
const ImageToImageIcon = createIcon({
|
const ImageToImageIcon = createIcon({
|
||||||
displayName: 'ImageToImageIcon',
|
displayName: 'ImageToImageIcon',
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import { createIcon } from "@chakra-ui/react";
|
import { createIcon } from '@chakra-ui/react';
|
||||||
|
|
||||||
const NodesIcon = createIcon({
|
const NodesIcon = createIcon({
|
||||||
displayName: 'NodesIcon',
|
displayName: 'NodesIcon',
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import { createIcon } from "@chakra-ui/react";
|
import { createIcon } from '@chakra-ui/react';
|
||||||
|
|
||||||
const OutpaintIcon = createIcon({
|
const OutpaintIcon = createIcon({
|
||||||
displayName: 'OutpaintIcon',
|
displayName: 'OutpaintIcon',
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import { createIcon } from "@chakra-ui/react";
|
import { createIcon } from '@chakra-ui/react';
|
||||||
|
|
||||||
const TextToImageIcon = createIcon({
|
const TextToImageIcon = createIcon({
|
||||||
displayName: 'TextToImageIcon',
|
displayName: 'TextToImageIcon',
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
import { NUMPY_RAND_MAX, NUMPY_RAND_MIN } from 'app/constants';
|
import { NUMPY_RAND_MAX, NUMPY_RAND_MIN } from 'app/constants';
|
||||||
import { OptionsState } from 'features/options/store/optionsSlice';
|
import { OptionsState } from 'features/options/store/optionsSlice';
|
||||||
import { SystemState } from 'features/system/store/systemSlice';
|
import { SystemState } from 'features/system/store/systemSlice';
|
||||||
|
import { Vector2d } from 'konva/lib/types';
|
||||||
|
import { Dimensions } from 'features/canvas/store/canvasTypes';
|
||||||
|
|
||||||
import { stringToSeedWeightsArray } from './seedWeightPairs';
|
import { stringToSeedWeightsArray } from './seedWeightPairs';
|
||||||
import randomInt from './randomInt';
|
import randomInt from './randomInt';
|
||||||
import { InvokeTabName } from 'features/tabs/components/InvokeTabs';
|
import { InvokeTabName } from 'features/tabs/tabMap';
|
||||||
import {
|
import {
|
||||||
CanvasState,
|
CanvasState,
|
||||||
isCanvasMaskLine,
|
isCanvasMaskLine,
|
||||||
@ -12,6 +14,10 @@ import {
|
|||||||
import generateMask from 'features/canvas/util/generateMask';
|
import generateMask from 'features/canvas/util/generateMask';
|
||||||
import openBase64ImageInTab from './openBase64ImageInTab';
|
import openBase64ImageInTab from './openBase64ImageInTab';
|
||||||
import { getCanvasBaseLayer } from 'features/canvas/util/konvaInstanceProvider';
|
import { getCanvasBaseLayer } from 'features/canvas/util/konvaInstanceProvider';
|
||||||
|
import type {
|
||||||
|
UpscalingLevel,
|
||||||
|
FacetoolType,
|
||||||
|
} from 'features/options/store/optionsSlice';
|
||||||
|
|
||||||
export type FrontendToBackendParametersConfig = {
|
export type FrontendToBackendParametersConfig = {
|
||||||
generationMode: InvokeTabName;
|
generationMode: InvokeTabName;
|
||||||
@ -21,13 +27,68 @@ export type FrontendToBackendParametersConfig = {
|
|||||||
imageToProcessUrl?: string;
|
imageToProcessUrl?: string;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export type BackendGenerationParameters = {
|
||||||
|
prompt: string;
|
||||||
|
iterations: number;
|
||||||
|
steps: number;
|
||||||
|
cfg_scale: number;
|
||||||
|
threshold: number;
|
||||||
|
perlin: number;
|
||||||
|
height: number;
|
||||||
|
width: number;
|
||||||
|
sampler_name: string;
|
||||||
|
seed: number;
|
||||||
|
progress_images: boolean;
|
||||||
|
progress_latents: boolean;
|
||||||
|
save_intermediates: number;
|
||||||
|
generation_mode: InvokeTabName;
|
||||||
|
init_mask: string;
|
||||||
|
init_img?: string;
|
||||||
|
fit?: boolean;
|
||||||
|
seam_size?: number;
|
||||||
|
seam_blur?: number;
|
||||||
|
seam_strength?: number;
|
||||||
|
seam_steps?: number;
|
||||||
|
tile_size?: number;
|
||||||
|
infill_method?: string;
|
||||||
|
force_outpaint?: boolean;
|
||||||
|
seamless?: boolean;
|
||||||
|
hires_fix?: boolean;
|
||||||
|
strength?: number;
|
||||||
|
invert_mask?: boolean;
|
||||||
|
inpaint_replace?: number;
|
||||||
|
bounding_box?: Vector2d & Dimensions;
|
||||||
|
inpaint_width?: number;
|
||||||
|
inpaint_height?: number;
|
||||||
|
with_variations?: Array<Array<number>>;
|
||||||
|
variation_amount?: number;
|
||||||
|
enable_image_debugging?: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type BackendEsrGanParameters = {
|
||||||
|
level: UpscalingLevel;
|
||||||
|
strength: number;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type BackendFacetoolParameters = {
|
||||||
|
type: FacetoolType;
|
||||||
|
strength: number;
|
||||||
|
codeformer_fidelity?: number;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type BackendParameters = {
|
||||||
|
generationParameters: BackendGenerationParameters;
|
||||||
|
esrganParameters: false | BackendEsrGanParameters;
|
||||||
|
facetoolParameters: false | BackendFacetoolParameters;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Translates/formats frontend state into parameters suitable
|
* Translates/formats frontend state into parameters suitable
|
||||||
* for consumption by the API.
|
* for consumption by the API.
|
||||||
*/
|
*/
|
||||||
export const frontendToBackendParameters = (
|
export const frontendToBackendParameters = (
|
||||||
config: FrontendToBackendParametersConfig
|
config: FrontendToBackendParametersConfig
|
||||||
): { [key: string]: any } => {
|
): BackendParameters => {
|
||||||
const canvasBaseLayer = getCanvasBaseLayer();
|
const canvasBaseLayer = getCanvasBaseLayer();
|
||||||
|
|
||||||
const { generationMode, optionsState, canvasState, systemState } = config;
|
const { generationMode, optionsState, canvasState, systemState } = config;
|
||||||
@ -73,7 +134,7 @@ export const frontendToBackendParameters = (
|
|||||||
enableImageDebugging,
|
enableImageDebugging,
|
||||||
} = systemState;
|
} = systemState;
|
||||||
|
|
||||||
const generationParameters: { [k: string]: any } = {
|
const generationParameters: BackendGenerationParameters = {
|
||||||
prompt,
|
prompt,
|
||||||
iterations,
|
iterations,
|
||||||
steps,
|
steps,
|
||||||
@ -91,8 +152,8 @@ export const frontendToBackendParameters = (
|
|||||||
init_mask: '',
|
init_mask: '',
|
||||||
};
|
};
|
||||||
|
|
||||||
let esrganParameters: false | { [k: string]: any } = false;
|
let esrganParameters: false | BackendEsrGanParameters = false;
|
||||||
let facetoolParameters: false | { [k: string]: any } = false;
|
let facetoolParameters: false | BackendFacetoolParameters = false;
|
||||||
|
|
||||||
generationParameters.seed = shouldRandomizeSeed
|
generationParameters.seed = shouldRandomizeSeed
|
||||||
? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX)
|
? randomInt(NUMPY_RAND_MIN, NUMPY_RAND_MAX)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import IAIAlertDialog from 'common/components/IAIAlertDialog';
|
import IAIAlertDialog from 'common/components/IAIAlertDialog';
|
||||||
import IAIButton from 'common/components/IAIButton';
|
import IAIButton from 'common/components/IAIButton';
|
||||||
import { clearCanvasHistory } from 'features/canvas/store/canvasSlice';
|
import { clearCanvasHistory } from 'features/canvas/store/canvasSlice';
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import { useCallback, useRef } from 'react';
|
import { useCallback, useRef } from 'react';
|
||||||
import Konva from 'konva';
|
import Konva from 'konva';
|
||||||
import { Layer, Stage } from 'react-konva';
|
import { Layer, Stage } from 'react-konva';
|
||||||
import { useAppSelector } from 'app/store';
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import {
|
import {
|
||||||
canvasSelector,
|
canvasSelector,
|
||||||
isStagingSelector,
|
isStagingSelector,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppSelector } from 'app/store';
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import { Group, Rect } from 'react-konva';
|
import { Group, Rect } from 'react-konva';
|
||||||
import { canvasSelector } from '../store/canvasSelectors';
|
import { canvasSelector } from '../store/canvasSelectors';
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import { useColorMode } from '@chakra-ui/react';
|
import { useColorMode } from '@chakra-ui/react';
|
||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppSelector } from 'app/store';
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import { ReactNode, useCallback, useLayoutEffect, useState } from 'react';
|
import { ReactNode, useCallback, useLayoutEffect, useState } from 'react';
|
||||||
import { Group, Line as KonvaLine } from 'react-konva';
|
import { Group, Line as KonvaLine } from 'react-konva';
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { RootState, useAppSelector } from 'app/store';
|
import { RootState } from 'app/store';
|
||||||
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import { GalleryState } from 'features/gallery/store/gallerySlice';
|
import { GalleryState } from 'features/gallery/store/gallerySlice';
|
||||||
import { ImageConfig } from 'konva/lib/shapes/Image';
|
import { ImageConfig } from 'konva/lib/shapes/Image';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppSelector } from 'app/store';
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import { RectConfig } from 'konva/lib/shapes/Rect';
|
import { RectConfig } from 'konva/lib/shapes/Rect';
|
||||||
import { Rect } from 'react-konva';
|
import { Rect } from 'react-konva';
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import { GroupConfig } from 'konva/lib/Group';
|
import { GroupConfig } from 'konva/lib/Group';
|
||||||
import { Group, Line } from 'react-konva';
|
import { Group, Line } from 'react-konva';
|
||||||
import { useAppSelector } from 'app/store';
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||||
import { isCanvasMaskLine } from '../store/canvasTypes';
|
import { isCanvasMaskLine } from '../store/canvasTypes';
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppSelector } from 'app/store';
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import { Group, Line, Rect } from 'react-konva';
|
import { Group, Line, Rect } from 'react-konva';
|
||||||
import {
|
import {
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import { Spinner } from '@chakra-ui/react';
|
import { Spinner } from '@chakra-ui/react';
|
||||||
import { useLayoutEffect, useRef } from 'react';
|
import { useLayoutEffect, useRef } from 'react';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
||||||
import {
|
import {
|
||||||
resizeAndScaleCanvas,
|
resizeAndScaleCanvas,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppSelector } from 'app/store';
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import { GroupConfig } from 'konva/lib/Group';
|
import { GroupConfig } from 'konva/lib/Group';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import { Group, Rect } from 'react-konva';
|
import { Group, Rect } from 'react-konva';
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import { ButtonGroup, Flex } from '@chakra-ui/react';
|
import { ButtonGroup, Flex } from '@chakra-ui/react';
|
||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import IAIIconButton from 'common/components/IAIIconButton';
|
import IAIIconButton from 'common/components/IAIIconButton';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import { useCallback } from 'react';
|
import { useCallback } from 'react';
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppSelector } from 'app/store';
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||||
import IAICanvasStatusTextCursorPos from './IAICanvasStatusText/IAICanvasStatusTextCursorPos';
|
import IAICanvasStatusTextCursorPos from './IAICanvasStatusText/IAICanvasStatusTextCursorPos';
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppSelector } from 'app/store';
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||||
import React from 'react';
|
import React from 'react';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
|
@ -2,7 +2,7 @@ import { createSelector } from '@reduxjs/toolkit';
|
|||||||
import { GroupConfig } from 'konva/lib/Group';
|
import { GroupConfig } from 'konva/lib/Group';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import { Circle, Group } from 'react-konva';
|
import { Circle, Group } from 'react-konva';
|
||||||
import { useAppSelector } from 'app/store';
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||||
import { rgbaColorToString } from 'features/canvas/util/colorToString';
|
import { rgbaColorToString } from 'features/canvas/util/colorToString';
|
||||||
import {
|
import {
|
||||||
|
@ -5,7 +5,7 @@ import { Vector2d } from 'konva/lib/types';
|
|||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import { useCallback, useEffect, useRef, useState } from 'react';
|
import { useCallback, useEffect, useRef, useState } from 'react';
|
||||||
import { Group, Rect, Transformer } from 'react-konva';
|
import { Group, Rect, Transformer } from 'react-konva';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import {
|
import {
|
||||||
roundDownToMultiple,
|
roundDownToMultiple,
|
||||||
roundToMultiple,
|
roundToMultiple,
|
||||||
|
@ -7,7 +7,7 @@ import {
|
|||||||
setMaskColor,
|
setMaskColor,
|
||||||
setShouldPreserveMaskedArea,
|
setShouldPreserveMaskedArea,
|
||||||
} from 'features/canvas/store/canvasSlice';
|
} from 'features/canvas/store/canvasSlice';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import IAIIconButton from 'common/components/IAIIconButton';
|
import IAIIconButton from 'common/components/IAIIconButton';
|
||||||
import { FaMask, FaTrash } from 'react-icons/fa';
|
import { FaMask, FaTrash } from 'react-icons/fa';
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useHotkeys } from 'react-hotkeys-hook';
|
import { useHotkeys } from 'react-hotkeys-hook';
|
||||||
import { FaRedo } from 'react-icons/fa';
|
import { FaRedo } from 'react-icons/fa';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import IAIIconButton from 'common/components/IAIIconButton';
|
import IAIIconButton from 'common/components/IAIIconButton';
|
||||||
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||||
|
@ -10,7 +10,7 @@ import {
|
|||||||
setShouldShowIntermediates,
|
setShouldShowIntermediates,
|
||||||
setShouldSnapToGrid,
|
setShouldSnapToGrid,
|
||||||
} from 'features/canvas/store/canvasSlice';
|
} from 'features/canvas/store/canvasSlice';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import IAIIconButton from 'common/components/IAIIconButton';
|
import IAIIconButton from 'common/components/IAIIconButton';
|
||||||
import { FaWrench } from 'react-icons/fa';
|
import { FaWrench } from 'react-icons/fa';
|
||||||
|
@ -7,7 +7,7 @@ import {
|
|||||||
setBrushSize,
|
setBrushSize,
|
||||||
setTool,
|
setTool,
|
||||||
} from 'features/canvas/store/canvasSlice';
|
} from 'features/canvas/store/canvasSlice';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import IAIIconButton from 'common/components/IAIIconButton';
|
import IAIIconButton from 'common/components/IAIIconButton';
|
||||||
import {
|
import {
|
||||||
|
@ -8,7 +8,7 @@ import {
|
|||||||
setLayer,
|
setLayer,
|
||||||
setTool,
|
setTool,
|
||||||
} from 'features/canvas/store/canvasSlice';
|
} from 'features/canvas/store/canvasSlice';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import IAIIconButton from 'common/components/IAIIconButton';
|
import IAIIconButton from 'common/components/IAIIconButton';
|
||||||
import {
|
import {
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useHotkeys } from 'react-hotkeys-hook';
|
import { useHotkeys } from 'react-hotkeys-hook';
|
||||||
import { FaUndo } from 'react-icons/fa';
|
import { FaUndo } from 'react-icons/fa';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import IAIIconButton from 'common/components/IAIIconButton';
|
import IAIIconButton from 'common/components/IAIIconButton';
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import { KonvaEventObject } from 'konva/lib/Node';
|
import { KonvaEventObject } from 'konva/lib/Node';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import { useCallback } from 'react';
|
import { useCallback } from 'react';
|
||||||
|
@ -10,7 +10,7 @@ import {
|
|||||||
setShouldSnapToGrid,
|
setShouldSnapToGrid,
|
||||||
setTool,
|
setTool,
|
||||||
} from 'features/canvas/store/canvasSlice';
|
} from 'features/canvas/store/canvasSlice';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import { useRef } from 'react';
|
import { useRef } from 'react';
|
||||||
import {
|
import {
|
||||||
canvasSelector,
|
canvasSelector,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
||||||
import Konva from 'konva';
|
import Konva from 'konva';
|
||||||
import { KonvaEventObject } from 'konva/lib/Node';
|
import { KonvaEventObject } from 'konva/lib/Node';
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
||||||
import Konva from 'konva';
|
import Konva from 'konva';
|
||||||
import { Vector2d } from 'konva/lib/types';
|
import { Vector2d } from 'konva/lib/types';
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import { useAppDispatch } from 'app/store';
|
import { useAppDispatch } from 'app/storeHooks';
|
||||||
import { useCallback } from 'react';
|
import { useCallback } from 'react';
|
||||||
import { mouseLeftCanvas } from 'features/canvas/store/canvasSlice';
|
import { mouseLeftCanvas } from 'features/canvas/store/canvasSlice';
|
||||||
|
|
||||||
|
@ -1,10 +1,13 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
import { activeTabNameSelector } from 'features/options/store/optionsSelectors';
|
||||||
import Konva from 'konva';
|
import Konva from 'konva';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import { MutableRefObject, useCallback } from 'react';
|
import { MutableRefObject, useCallback } from 'react';
|
||||||
import { canvasSelector, isStagingSelector } from 'features/canvas/store/canvasSelectors';
|
import {
|
||||||
|
canvasSelector,
|
||||||
|
isStagingSelector,
|
||||||
|
} from 'features/canvas/store/canvasSelectors';
|
||||||
import {
|
import {
|
||||||
// addPointToCurrentEraserLine,
|
// addPointToCurrentEraserLine,
|
||||||
addPointToCurrentLine,
|
addPointToCurrentLine,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import Konva from 'konva';
|
import Konva from 'konva';
|
||||||
import { KonvaEventObject } from 'konva/lib/Node';
|
import { KonvaEventObject } from 'konva/lib/Node';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import { useAppDispatch } from 'app/store';
|
import { useAppDispatch } from 'app/storeHooks';
|
||||||
import Konva from 'konva';
|
import Konva from 'konva';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import {
|
import {
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { isEqual } from 'lodash';
|
import { isEqual } from 'lodash';
|
||||||
|
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import { RootState } from 'app/store';
|
import { RootState } from 'app/store';
|
||||||
import {
|
import {
|
||||||
OptionsState,
|
OptionsState,
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
import { RootState, useAppSelector } from 'app/store';
|
import { RootState } from 'app/store';
|
||||||
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import CurrentImageButtons from './CurrentImageButtons';
|
import CurrentImageButtons from './CurrentImageButtons';
|
||||||
import { MdPhoto } from 'react-icons/md';
|
import { MdPhoto } from 'react-icons/md';
|
||||||
import CurrentImagePreview from './CurrentImagePreview';
|
import CurrentImagePreview from './CurrentImagePreview';
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
import { IconButton, Image } from '@chakra-ui/react';
|
import { IconButton, Image } from '@chakra-ui/react';
|
||||||
import { useState } from 'react';
|
import { useState } from 'react';
|
||||||
import { FaAngleLeft, FaAngleRight } from 'react-icons/fa';
|
import { FaAngleLeft, FaAngleRight } from 'react-icons/fa';
|
||||||
import { RootState, useAppDispatch, useAppSelector } from 'app/store';
|
import { RootState } from 'app/store';
|
||||||
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import {
|
import {
|
||||||
GalleryCategory,
|
GalleryCategory,
|
||||||
GalleryState,
|
GalleryState,
|
||||||
|
@ -14,15 +14,9 @@ import {
|
|||||||
Flex,
|
Flex,
|
||||||
} from '@chakra-ui/react';
|
} from '@chakra-ui/react';
|
||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import {
|
import { ChangeEvent, ReactElement, SyntheticEvent } from 'react';
|
||||||
ChangeEvent,
|
import { cloneElement, forwardRef, useRef } from 'react';
|
||||||
cloneElement,
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
forwardRef,
|
|
||||||
ReactElement,
|
|
||||||
SyntheticEvent,
|
|
||||||
useRef,
|
|
||||||
} from 'react';
|
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
|
||||||
import { deleteImage } from 'app/socketio/actions';
|
import { deleteImage } from 'app/socketio/actions';
|
||||||
import { RootState } from 'app/store';
|
import { RootState } from 'app/store';
|
||||||
import {
|
import {
|
||||||
|
@ -6,7 +6,7 @@ import {
|
|||||||
Tooltip,
|
Tooltip,
|
||||||
useToast,
|
useToast,
|
||||||
} from '@chakra-ui/react';
|
} from '@chakra-ui/react';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import {
|
import {
|
||||||
setCurrentImage,
|
setCurrentImage,
|
||||||
setShouldHoldGalleryOpen,
|
setShouldHoldGalleryOpen,
|
||||||
|
@ -12,7 +12,7 @@ import { useHotkeys } from 'react-hotkeys-hook';
|
|||||||
import { MdPhotoLibrary } from 'react-icons/md';
|
import { MdPhotoLibrary } from 'react-icons/md';
|
||||||
import { BsPinAngle, BsPinAngleFill } from 'react-icons/bs';
|
import { BsPinAngle, BsPinAngleFill } from 'react-icons/bs';
|
||||||
import { requestImages } from 'app/socketio/actions';
|
import { requestImages } from 'app/socketio/actions';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import IAIIconButton from 'common/components/IAIIconButton';
|
import IAIIconButton from 'common/components/IAIIconButton';
|
||||||
import {
|
import {
|
||||||
selectNextImage,
|
selectNextImage,
|
||||||
@ -41,7 +41,7 @@ import IAICheckbox from 'common/components/IAICheckbox';
|
|||||||
import { setDoesCanvasNeedScaling } from 'features/canvas/store/canvasSlice';
|
import { setDoesCanvasNeedScaling } from 'features/canvas/store/canvasSlice';
|
||||||
import _ from 'lodash';
|
import _ from 'lodash';
|
||||||
import IAIButton from 'common/components/IAIButton';
|
import IAIButton from 'common/components/IAIButton';
|
||||||
import { InvokeTabName } from 'features/tabs/components/InvokeTabs';
|
import { InvokeTabName } from 'features/tabs/tabMap';
|
||||||
|
|
||||||
const GALLERY_SHOW_BUTTONS_MIN_WIDTH = 320;
|
const GALLERY_SHOW_BUTTONS_MIN_WIDTH = 320;
|
||||||
const GALLERY_IMAGE_WIDTH_OFFSET = 40;
|
const GALLERY_IMAGE_WIDTH_OFFSET = 40;
|
||||||
@ -55,6 +55,7 @@ const GALLERY_TAB_WIDTHS: Record<
|
|||||||
unifiedCanvas: { galleryMinWidth: 200, galleryMaxWidth: 200 },
|
unifiedCanvas: { galleryMinWidth: 200, galleryMaxWidth: 200 },
|
||||||
nodes: { galleryMinWidth: 200, galleryMaxWidth: 500 },
|
nodes: { galleryMinWidth: 200, galleryMaxWidth: 500 },
|
||||||
postprocess: { galleryMinWidth: 200, galleryMaxWidth: 500 },
|
postprocess: { galleryMinWidth: 200, galleryMaxWidth: 500 },
|
||||||
|
training: { galleryMinWidth: 200, galleryMaxWidth: 500 },
|
||||||
};
|
};
|
||||||
|
|
||||||
const LIGHTBOX_GALLERY_WIDTH = 400;
|
const LIGHTBOX_GALLERY_WIDTH = 400;
|
||||||
|
@ -10,7 +10,7 @@ import {
|
|||||||
import { ExternalLinkIcon } from '@chakra-ui/icons';
|
import { ExternalLinkIcon } from '@chakra-ui/icons';
|
||||||
import { memo } from 'react';
|
import { memo } from 'react';
|
||||||
import { IoArrowUndoCircleOutline } from 'react-icons/io5';
|
import { IoArrowUndoCircleOutline } from 'react-icons/io5';
|
||||||
import { useAppDispatch } from 'app/store';
|
import { useAppDispatch } from 'app/storeHooks';
|
||||||
import * as InvokeAI from 'app/invokeai';
|
import * as InvokeAI from 'app/invokeai';
|
||||||
import {
|
import {
|
||||||
setCfgScale,
|
setCfgScale,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppSelector } from 'app/store';
|
import { useAppSelector } from 'app/storeHooks';
|
||||||
import { gallerySelector } from '../store/gallerySliceSelectors';
|
import { gallerySelector } from '../store/gallerySliceSelectors';
|
||||||
|
|
||||||
const selector = createSelector(gallerySelector, (gallery) => ({
|
const selector = createSelector(gallerySelector, (gallery) => ({
|
||||||
|
@ -3,7 +3,7 @@ import type { PayloadAction } from '@reduxjs/toolkit';
|
|||||||
import _, { clamp } from 'lodash';
|
import _, { clamp } from 'lodash';
|
||||||
import * as InvokeAI from 'app/invokeai';
|
import * as InvokeAI from 'app/invokeai';
|
||||||
import { IRect } from 'konva/lib/types';
|
import { IRect } from 'konva/lib/types';
|
||||||
import { InvokeTabName } from 'features/tabs/components/InvokeTabs';
|
import { InvokeTabName } from 'features/tabs/tabMap';
|
||||||
|
|
||||||
export type GalleryCategory = 'user' | 'result';
|
export type GalleryCategory = 'user' | 'result';
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ export const uploadImage =
|
|||||||
});
|
});
|
||||||
|
|
||||||
const image = (await response.json()) as InvokeAI.ImageUploadResponse;
|
const image = (await response.json()) as InvokeAI.ImageUploadResponse;
|
||||||
console.log(image)
|
console.log(image);
|
||||||
const newImage: InvokeAI.Image = {
|
const newImage: InvokeAI.Image = {
|
||||||
uuid: uuidv4(),
|
uuid: uuidv4(),
|
||||||
category: 'user',
|
category: 'user',
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import { IconButton } from '@chakra-ui/react';
|
import { IconButton } from '@chakra-ui/react';
|
||||||
import { RootState, useAppDispatch, useAppSelector } from 'app/store';
|
import { RootState } from 'app/store';
|
||||||
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import IAIIconButton from 'common/components/IAIIconButton';
|
import IAIIconButton from 'common/components/IAIIconButton';
|
||||||
import CurrentImageButtons from 'features/gallery/components/CurrentImageButtons';
|
import CurrentImageButtons from 'features/gallery/components/CurrentImageButtons';
|
||||||
import { imagesSelector } from 'features/gallery/components/CurrentImagePreview';
|
import { imagesSelector } from 'features/gallery/components/CurrentImagePreview';
|
||||||
|
@ -14,7 +14,7 @@ type ReactPanZoomProps = {
|
|||||||
image: string;
|
image: string;
|
||||||
styleClass?: string;
|
styleClass?: string;
|
||||||
alt?: string;
|
alt?: string;
|
||||||
ref?: any;
|
ref?: React.Ref<HTMLImageElement>;
|
||||||
};
|
};
|
||||||
|
|
||||||
export default function ReactPanZoom({
|
export default function ReactPanZoom({
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import { Box, Flex } from '@chakra-ui/react';
|
import { Box, Flex } from '@chakra-ui/react';
|
||||||
import { createSelector } from '@reduxjs/toolkit';
|
import { createSelector } from '@reduxjs/toolkit';
|
||||||
import { useAppDispatch, useAppSelector } from 'app/store';
|
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||||
import IAISlider from 'common/components/IAISlider';
|
import IAISlider from 'common/components/IAISlider';
|
||||||
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
import { canvasSelector } from 'features/canvas/store/canvasSelectors';
|
||||||
import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice';
|
import { setBoundingBoxDimensions } from 'features/canvas/store/canvasSlice';
|
||||||
@ -9,7 +9,8 @@ import _ from 'lodash';
|
|||||||
const selector = createSelector(
|
const selector = createSelector(
|
||||||
canvasSelector,
|
canvasSelector,
|
||||||
(canvas) => {
|
(canvas) => {
|
||||||
const { boundingBoxDimensions, boundingBoxScaleMethod: boundingBoxScale } = canvas;
|
const { boundingBoxDimensions, boundingBoxScaleMethod: boundingBoxScale } =
|
||||||
|
canvas;
|
||||||
return {
|
return {
|
||||||
boundingBoxDimensions,
|
boundingBoxDimensions,
|
||||||
boundingBoxScale,
|
boundingBoxScale,
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user