Merge branch 'main' into bugfix/use-cu117-wheel

This commit is contained in:
Lincoln Stein 2023-02-03 18:15:54 -05:00 committed by GitHub
commit 2e3cd03b27
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 365 additions and 807 deletions

View File

@ -1,18 +1,20 @@
# use this file as a whitelist
*
!assets/caution.png
!backend
!frontend/dist
!invokeai
!ldm
!pyproject.toml
!README.md
!scripts
# Guard against pulling in any models that might exist in the directory tree
**.pt*
**/*.pt*
**/*.ckpt
# unignore configs, but only ignore the custom models.yaml, in case it exists
!configs
configs/models.yaml
configs/models.yaml.orig
# whitelist frontend, but ignore node_modules
invokeai/frontend/node_modules
# ignore python cache
**/__pycache__
**/*.py[cod]
**/*.egg-info

2
.github/CODEOWNERS vendored
View File

@ -4,4 +4,4 @@ scripts/legacy_api.py @CapableWeb
tests/legacy_tests.sh @CapableWeb
installer/ @ebr
.github/workflows/ @mauwii
docker_build/ @mauwii
docker/ @mauwii

View File

@ -1,88 +0,0 @@
name: Build and push cloud image
on:
workflow_dispatch:
# push:
# branches:
# - main
# tags:
# - v*
# # we will NOT push the image on pull requests, only test buildability.
# pull_request:
# branches:
# - main
permissions:
contents: read
packages: write
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
docker:
if: github.event.pull_request.draft == false
strategy:
fail-fast: false
matrix:
arch:
- x86_64
# requires resolving a patchmatch issue
# - aarch64
runs-on: ubuntu-latest
name: ${{ matrix.arch }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
if: matrix.arch == 'aarch64'
- name: Docker meta
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# see https://github.com/docker/metadata-action
# will push the following tags:
# :edge
# :main (+ any other branches enabled in the workflow)
# :<tag>
# :1.2.3 (for semver tags)
# :1.2 (for semver tags)
# :<sha>
tags: |
type=edge,branch=main
type=ref,event=branch
type=ref,event=tag
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha
# suffix image tags with architecture
flavor: |
latest=auto
suffix=-${{ matrix.arch }},latest=true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
# do not login to container registry on PRs
- if: github.event_name != 'pull_request'
name: Docker login
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push cloud image
uses: docker/build-push-action@v3
with:
context: .
file: docker-build/Dockerfile.cloud
platforms: Linux/${{ matrix.arch }}
# do not push the image on PRs
push: false
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@ -15,14 +15,19 @@ jobs:
flavor:
- amd
- cuda
- cpu
include:
- flavor: amd
pip-extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
dockerfile: docker-build/Dockerfile
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
- flavor: cuda
pip-extra-index-url: ''
dockerfile: docker-build/Dockerfile
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
- flavor: cpu
pip-extra-index-url: 'https://download.pytorch.org/whl/cpu'
dockerfile: docker/Dockerfile
platforms: linux/amd64,linux/arm64
runs-on: ubuntu-latest
name: ${{ matrix.flavor }}
@ -34,7 +39,8 @@ jobs:
id: meta
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}-${{ matrix.flavor }}
github-token: ${{ secrets.GITHUB_TOKEN }}
images: ghcr.io/${{ github.repository }}
tags: |
type=ref,event=branch
type=ref,event=tag
@ -43,7 +49,8 @@ jobs:
type=semver,pattern={{major}}
type=sha
flavor: |
latest=true
latest=${{ matrix.flavor == 'cuda' && github.ref == 'refs/heads/main' }}
suffix=${{ matrix.flavor }},onlatest=false
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
@ -69,5 +76,15 @@ jobs:
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: PIP_EXTRA_INDEX_URL=${{ matrix.pip-extra-index-url }}
# cache-from: type=gha
# cache-to: type=gha,mode=max
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Output image, digest and metadata to summary
run: |
{
echo imageid: "${{ steps.docker_build.outputs.imageid }}"
echo digest: "${{ steps.docker_build.outputs.digest }}"
echo labels: "${{ steps.meta.outputs.labels }}"
echo tags: "${{ steps.meta.outputs.tags }}"
echo version: "${{ steps.meta.outputs.version }}"
} >> "$GITHUB_STEP_SUMMARY"

View File

@ -1,86 +0,0 @@
#######################
#### Builder stage ####
FROM library/ubuntu:22.04 AS builder
ARG DEBIAN_FRONTEND=noninteractive
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt update && apt-get install -y \
git \
libglib2.0-0 \
libgl1-mesa-glx \
python3-venv \
python3-pip \
build-essential \
python3-opencv \
libopencv-dev
# This is needed for patchmatch support
RUN cd /usr/lib/x86_64-linux-gnu/pkgconfig/ &&\
ln -sf opencv4.pc opencv.pc
ARG WORKDIR=/invokeai
WORKDIR ${WORKDIR}
ENV VIRTUAL_ENV=${WORKDIR}/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN --mount=type=cache,target=/root/.cache/pip \
python3 -m venv ${VIRTUAL_ENV} &&\
pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
torch==1.12.0+cu116 \
torchvision==0.13.0+cu116 &&\
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
COPY . .
RUN --mount=type=cache,target=/root/.cache/pip \
cp environments-and-requirements/requirements-lin-cuda.txt requirements.txt && \
pip install -r requirements.txt &&\
pip install -e .
#######################
#### Runtime stage ####
FROM library/ubuntu:22.04 as runtime
ARG DEBIAN_FRONTEND=noninteractive
ENV PYTHONUNBUFFERED=1
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
apt update && apt install -y --no-install-recommends \
git \
curl \
ncdu \
iotop \
bzip2 \
libglib2.0-0 \
libgl1-mesa-glx \
python3-venv \
python3-pip \
build-essential \
python3-opencv \
libopencv-dev &&\
apt-get clean && apt-get autoclean
ARG WORKDIR=/invokeai
WORKDIR ${WORKDIR}
ENV INVOKEAI_ROOT=/mnt/invokeai
ENV VIRTUAL_ENV=${WORKDIR}/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
COPY --from=builder ${WORKDIR} ${WORKDIR}
COPY --from=builder /usr/lib/x86_64-linux-gnu/pkgconfig /usr/lib/x86_64-linux-gnu/pkgconfig
# build patchmatch
RUN python -c "from patchmatch import patch_match"
## workaround for non-existent initfile when runtime directory is mounted; see #1613
RUN touch /root/.invokeai
ENTRYPOINT ["bash"]
CMD ["-c", "python3 scripts/invoke.py --web --host 0.0.0.0"]

View File

@ -1,44 +0,0 @@
# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted
INVOKEAI_ROOT=/mnt/invokeai
# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container
HOST_MOUNT_PATH=${HOME}/invokeai
IMAGE=local/invokeai:latest
USER=$(shell id -u)
GROUP=$(shell id -g)
# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host.
# This is consistent with the expected non-Docker behaviour.
# Contents can be moved to a persistent storage and used to prime the cache on another host.
build:
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
configure:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
${IMAGE} -c "python scripts/configure_invokeai.py"
# Run the container with the runtime dir mounted and the web server exposed on port 9090
web:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
-p 9090:9090 \
${IMAGE} -c "python scripts/invoke.py --web --host 0.0.0.0"
# Run the cli with the runtime dir mounted
cli:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
${IMAGE} -c "python scripts/invoke.py"
# Run the container with the runtime dir mounted and open a bash shell
shell:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} ${IMAGE} --
.PHONY: build configure web cli shell

View File

@ -1,10 +0,0 @@
#!/usr/bin/env bash
# Variables shared by build.sh and run.sh
REPOSITORY_NAME=${REPOSITORY_NAME:-$(basename "$(git rev-parse --show-toplevel)")}
VOLUMENAME=${VOLUMENAME:-${REPOSITORY_NAME,,}_data}
ARCH=${ARCH:-$(uname -m)}
PLATFORM=${PLATFORM:-Linux/${ARCH}}
CONTAINER_FLAVOR=${CONTAINER_FLAVOR:-cuda}
INVOKEAI_BRANCH=$(git branch --show)
INVOKEAI_TAG=${REPOSITORY_NAME,,}-${CONTAINER_FLAVOR}:${INVOKEAI_TAG:-${INVOKEAI_BRANCH##*/}}

View File

@ -1,8 +1,12 @@
# syntax=docker/dockerfile:1
FROM python:3.9-slim AS python-base
# use bash
SHELL [ "/bin/bash", "-c" ]
# Maintained by Matthias Wild <mauwii@outlook.de>
ARG PYTHON_VERSION=3.9
##################
### base image ###
##################
FROM python:${PYTHON_VERSION}-slim AS python-base
# Install necesarry packages
RUN \
@ -17,12 +21,39 @@ RUN \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
ARG APPDIR=/usr/src/app
ENV APPDIR ${APPDIR}
# set working directory and path
ARG APPDIR=/usr/src
ARG APPNAME=InvokeAI
WORKDIR ${APPDIR}
ENV PATH=${APPDIR}/${APPNAME}/bin:$PATH
FROM python-base AS builder
######################
### build frontend ###
######################
FROM node:lts as frontend-builder
# Copy Sources
ARG APPDIR=/usr/src
WORKDIR ${APPDIR}
COPY --link . .
# install dependencies and build frontend
WORKDIR ${APPDIR}/invokeai/frontend
RUN \
--mount=type=cache,target=/usr/local/share/.cache/yarn/v6 \
yarn install \
--prefer-offline \
--frozen-lockfile \
--non-interactive \
--production=false \
&& yarn build
###################################
### install python dependencies ###
###################################
FROM python-base AS pyproject-builder
# Install dependencies
RUN \
--mount=type=cache,target=/var/cache/apt,sharing=locked \
--mount=type=cache,target=/var/lib/apt,sharing=locked \
@ -34,25 +65,28 @@ RUN \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# create virtual environment
RUN python3 -m venv "${APPNAME}" \
--upgrade-deps
# copy sources
COPY --link . .
COPY --from=frontend-builder ${APPDIR} .
# install pyproject.toml
ARG PIP_EXTRA_INDEX_URL
ENV PIP_EXTRA_INDEX_URL ${PIP_EXTRA_INDEX_URL}
RUN --mount=type=cache,target=/root/.cache/pip,sharing=locked \
"${APPDIR}/${APPNAME}/bin/pip" install \
--use-pep517 \
.
# install requirements
RUN python3 -m venv invokeai \
&& ${APPDIR}/invokeai/bin/pip \
install \
--no-cache-dir \
--use-pep517 \
.
#####################
### runtime image ###
#####################
FROM python-base AS runtime
# setup environment
COPY --link . .
COPY --from=builder ${APPDIR}/invokeai ${APPDIR}/invokeai
ENV PATH=${APPDIR}/invokeai/bin:$PATH
COPY --from=pyproject-builder ${APPDIR}/${APPNAME} ${APPDIR}/${APPNAME}
ENV INVOKEAI_ROOT=/data
ENV INVOKE_MODEL_RECONFIGURE="--yes --default_only"
@ -73,6 +107,6 @@ RUN \
&& rm -rf /var/lib/apt/lists/*
# set Entrypoint and default CMD
ENTRYPOINT [ "invoke" ]
ENTRYPOINT [ "invokeai" ]
CMD [ "--web", "--host=0.0.0.0" ]
VOLUME [ "/data" ]

View File

@ -2,30 +2,31 @@
set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#setup
#
# Some possible pip extra-index urls (cuda 11.7 is available without extra url):
#
# CUDA 11.6: https://download.pytorch.org/whl/cu116
# ROCm 5.2: https://download.pytorch.org/whl/rocm5.2
# CPU: https://download.pytorch.org/whl/cpu
#
# as found on https://pytorch.org/get-started/locally/
cd "$(dirname "$0")" || exit 1
SCRIPTDIR=$(dirname "$0")
cd "$SCRIPTDIR" || exit 1
source ./env.sh
DOCKERFILE=${INVOKE_DOCKERFILE:-"./Dockerfile"}
DOCKERFILE=${INVOKE_DOCKERFILE:-Dockerfile}
# print the settings
echo -e "You are using these values:\n"
echo -e "Dockerfile:\t ${DOCKERFILE}"
echo -e "extra-index-url: ${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename:\t ${VOLUMENAME}"
echo -e "arch:\t\t ${ARCH}"
echo -e "Platform:\t ${PLATFORM}"
echo -e "Invokeai_tag:\t ${INVOKEAI_TAG}\n"
echo -e "Dockerfile: \t${DOCKERFILE}"
echo -e "index-url: \t${PIP_EXTRA_INDEX_URL:-none}"
echo -e "Volumename: \t${VOLUMENAME}"
echo -e "Platform: \t${PLATFORM}"
echo -e "Registry: \t${CONTAINER_REGISTRY}"
echo -e "Repository: \t${CONTAINER_REPOSITORY}"
echo -e "Container Tag: \t${CONTAINER_TAG}"
echo -e "Container Image: ${CONTAINER_IMAGE}\n"
# Create docker volume
if [[ -n "$(docker volume ls -f name="${VOLUMENAME}" -q)" ]]; then
echo -e "Volume already exists\n"
else
@ -36,7 +37,7 @@ fi
# Build Container
docker build \
--platform="${PLATFORM}" \
--tag="${INVOKEAI_TAG}" \
${PIP_EXTRA_INDEX_URL:+--build-arg=PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL}"} \
--tag="${CONTAINER_IMAGE}" \
${PIP_EXTRA_INDEX_URL:+--build-arg="PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}"} \
--file="${DOCKERFILE}" \
..

35
docker/env.sh Normal file
View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
if [[ -z "$PIP_EXTRA_INDEX_URL" ]]; then
# Decide which container flavor to build if not specified
if [[ -z "$CONTAINER_FLAVOR" ]]; then
# Check for CUDA and ROCm
CUDA_AVAILABLE=$(python -c "import torch;print(torch.cuda.is_available())")
ROCM_AVAILABLE=$(python -c "import torch;print(torch.version.hip is not None)")
if [[ "$(uname -s)" != "Darwin" && "${CUDA_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR=cuda
elif [[ "$(uname -s)" != "Darwin" && "${ROCM_AVAILABLE}" == "True" ]]; then
CONTAINER_FLAVOR="rocm"
else
CONTAINER_FLAVOR="cpu"
fi
fi
# Set PIP_EXTRA_INDEX_URL based on container flavor
if [[ "$CONTAINER_FLAVOR" == "rocm" ]]; then
PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL-"https://download.pytorch.org/whl/rocm"}"
elif CONTAINER_FLAVOR=cpu; then
PIP_EXTRA_INDEX_URL="${PIP_EXTRA_INDEX_URL-"https://download.pytorch.org/whl/cpu"}"
fi
fi
# Variables shared by build.sh and run.sh
REPOSITORY_NAME="${REPOSITORY_NAME-$(basename "$(git rev-parse --show-toplevel)")}"
VOLUMENAME="${VOLUMENAME-"${REPOSITORY_NAME,,}_data"}"
ARCH="${ARCH-$(uname -m)}"
PLATFORM="${PLATFORM-Linux/${ARCH}}"
INVOKEAI_BRANCH="${INVOKEAI_BRANCH-$(git branch --show)}"
CONTAINER_REGISTRY="${CONTAINER_REGISTRY-"ghcr.io"}"
CONTAINER_REPOSITORY="${CONTAINER_REPOSITORY-"$(whoami)/${REPOSITORY_NAME}"}"
CONTAINER_TAG="${CONTAINER_TAG-"${INVOKEAI_BRANCH##*/}-${CONTAINER_FLAVOR}"}"
CONTAINER_IMAGE="${CONTAINER_REGISTRY}/${CONTAINER_REPOSITORY}:${CONTAINER_TAG}"
CONTAINER_IMAGE="${CONTAINER_IMAGE,,}"

View File

@ -4,27 +4,28 @@ set -e
# How to use: https://invoke-ai.github.io/InvokeAI/installation/INSTALL_DOCKER/#run-the-container
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
cd "$(dirname "$0")" || exit 1
SCRIPTDIR=$(dirname "$0")
cd "$SCRIPTDIR" || exit 1
source ./env.sh
echo -e "You are using these values:\n"
echo -e "Volumename:\t${VOLUMENAME}"
echo -e "Invokeai_tag:\t${INVOKEAI_TAG}"
echo -e "Invokeai_tag:\t${CONTAINER_IMAGE}"
echo -e "local Models:\t${MODELSPATH:-unset}\n"
docker run \
--interactive \
--tty \
--rm \
--platform="$PLATFORM" \
--platform="${PLATFORM}" \
--name="${REPOSITORY_NAME,,}" \
--hostname="${REPOSITORY_NAME,,}" \
--mount=source="$VOLUMENAME",target=/data \
--mount=source="${VOLUMENAME}",target=/data \
${MODELSPATH:+-u "$(id -u):$(id -g)"} \
${MODELSPATH:+--mount=type=bind,source=${MODELSPATH},target=/data/models} \
${HUGGING_FACE_HUB_TOKEN:+--env=HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}} \
${MODELSPATH:+--mount="type=bind,source=${MODELSPATH},target=/data/models"} \
${HUGGING_FACE_HUB_TOKEN:+--env="HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}"} \
--publish=9090:9090 \
--cap-add=sys_nice \
${GPU_FLAGS:+--gpus=${GPU_FLAGS}} \
"$INVOKEAI_TAG" ${1:+$@}
${GPU_FLAGS:+--gpus="${GPU_FLAGS}"} \
"${CONTAINER_IMAGE}" ${1:+$@}

View File

@ -14,22 +14,15 @@ title: Installing Manually
## Introduction
You have two choices for manual installation. The [first one](#pip-Install) uses
basic Python virtual environment (`venv`) command and `pip` package manager. The
[second one](#Conda-method) uses Anaconda3 package manager (`conda`). Both
methods require you to enter commands on the terminal, also known as the
"console".
Note that the `conda` installation method is currently deprecated and will not
be supported at some point in the future.
!!! tip As of InvokeAI v2.3.0 installation using the `conda` package manager
is no longer being supported. It will likely still work, but we are not testing
this installation method.
On Windows systems, you are encouraged to install and use the
[PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
which provides compatibility with Linux and Mac shells and nice features such as
command-line completion.
## pip Install
To install InvokeAI with virtual environments and the PIP package manager,
please follow these steps:
@ -50,44 +43,64 @@ please follow these steps:
This will create InvokeAI folder where you will follow the rest of the
steps.
3. From within the InvokeAI top-level directory, create and activate a virtual
environment named `.venv` and prompt displaying `InvokeAI`:
```bash
python -m venv .venv \
--prompt InvokeAI \
--upgrade-deps
source .venv/bin/activate
```
4. Make sure that pip is installed in your virtual environment an up to date:
```bash
python -m ensurepip \
--upgrade
python -m pip install \
--upgrade pip
```
5. Install Package
```bash
pip install --use-pep517 .
```
6. Set up the runtime directory
In this step you will initialize a runtime directory that will contain the
models, model config files, directory for textual inversion embeddings, and
your outputs. This keeps the runtime directory separate from the source code
and aids in updating.
You may pick any location for this directory using the `--root_dir` option
(abbreviated --root). If you don't pass this option, it will default to
`~/invokeai`.
3. Create a directory of to contain your InvokeAI installation (known as the "runtime"
or "root" directory). This is where your models, configs, and outputs will live
by default. Please keep in mind the disk space requirements - you will need at
least 18GB (as of this writing) for the models and the virtual environment.
From now on we will refer to this directory as `INVOKEAI_ROOT`. This keeps the
runtime directory separate from the source code and aids in updating.
```bash
invokeai-configure --root_dir ~/Programs/invokeai
export INVOKEAI_ROOT="~/invokeai"
mkdir ${INVOKEAI_ROOT}
```
4. From within the InvokeAI top-level directory, create and activate a virtual
environment named `.venv` and prompt displaying `InvokeAI`:
```bash
python -m venv ${INVOKEAI_ROOT}/.venv \
--prompt invokeai \
--upgrade-deps \
--copies
source ${INVOKEAI_ROOT}/.venv/bin/activate
```
!!! warning
You **may** create your virtual environment anywhere on the filesystem.
But IF you choose a location that is *not* inside the `$INVOKEAI_ROOT` directory,
then you must set the `INVOKEAI_ROOT` environment variable in your shell environment,
for example, by editing `~/.bashrc` or `~/.zshrc` files, or setting the Windows environment
variable. Refer to your operating system / shell documentation for the correct way of doing so.
5. Make sure that pip is installed in your virtual environment an up to date:
```bash
python -m pip install --upgrade pip
```
6. Install Package
```bash
pip install --use-pep517 .
```
Deactivate and reactivate your runtime directory so that the invokeai-specific commands
become available in the environment
```
deactivate && source ${INVOKEAI_ROOT}/.venv/bin/activate
```
7. Set up the runtime directory
In this step you will initialize your runtime directory with the downloaded
models, model config files, directory for textual inversion embeddings, and
your outputs.
```bash
invokeai-configure --root ${INVOKEAI_ROOT}
```
The script `invokeai-configure` will interactively guide you through the
@ -101,11 +114,6 @@ please follow these steps:
If you get an error message about a module not being installed, check that
the `invokeai` environment is active and if not, repeat step 5.
Note that `invokeai-configure` and `invokeai` should be installed under your
virtual environment directory and the system should find them on the PATH.
If this isn't working on your system, you can call the scripts directory
using `python scripts/configure_invokeai.py` and `python scripts/invoke.py`.
!!! tip
If you have already downloaded the weights file(s) for another Stable
@ -127,19 +135,19 @@ please follow these steps:
=== "CLI"
```bash
invoke.py --root ~/Programs/invokeai
invokeai --root ~/invokeai
```
=== "local Webserver"
```bash
invoke.py --web --root ~/Programs/invokeai
invokeai --web --root ~/invokeai
```
=== "Public Webserver"
```bash
invoke.py --web --host 0.0.0.0 --root ~/Programs/invokeai
invokeai --web --host 0.0.0.0 --root ~/invokeai
```
If you choose the run the web interface, point your browser at
@ -147,7 +155,8 @@ please follow these steps:
!!! tip
You can permanently set the location of the runtime directory by setting the environment variable INVOKEAI_ROOT to the path of the directory.
You can permanently set the location of the runtime directory by setting the environment variable `INVOKEAI_ROOT` to the path of the directory. As mentioned previously, this is
**required** if your virtual environment is located outside of your runtime directory.
8. Render away!
@ -163,38 +172,6 @@ please follow these steps:
then launch `invokeai` command. If you forget to activate the virtual
environment you will most likeley receive a `command not found` error.
!!! tip
Do not move the source code repository after installation. The virtual environment directory has absolute paths in it that get confused if the directory is moved.
## Creating an "install" version of InvokeAI
If you wish you can install InvokeAI and all its dependencies in the runtime
directory. This allows you to delete the source code repository and eliminates
the need to provide `--root_dir` at startup time. Note that this method only
works with the PIP method.
1. Follow the instructions for the PIP install, but in step #2 put the virtual
environment into the runtime directory. For example, assuming the runtime
directory lives in `~/Programs/invokeai`, you'd run:
```bash
python -m venv ~/Programs/invokeai
```
2. Now follow steps 3 to 5 in the PIP recipe, ending with the `pip install`
step.
3. Run one additional step while you are in the source code repository directory
```
pip install --use-pep517 . # note the dot in the end!!!
```
4. That's all! Now, whenever you activate the virtual environment, `invokeai`
will know where to look for the runtime directory without needing a
`--root_dir` argument. In addition, you can now move or delete the source
code repository entirely.
(Don't move the runtime directory!)
!!! warning
Do not move the runtime directory after installation. The virtual environment has absolute paths in it that get confused if the directory is moved.

View File

@ -1 +0,0 @@
010_INSTALL_AUTOMATED.md

View File

@ -1,374 +0,0 @@
---
title: Manual Installation
---
<figure markdown>
# :fontawesome-brands-linux: Linux | :fontawesome-brands-apple: macOS | :fontawesome-brands-windows: Windows
</figure>
!!! warning "This is for advanced Users"
who are already experienced with using conda or pip
## Introduction
You have two choices for manual installation, the [first one](#Conda_method)
based on the Anaconda3 package manager (`conda`), and
[a second one](#PIP_method) which uses basic Python virtual environment (`venv`)
commands and the PIP package manager. Both methods require you to enter commands
on the terminal, also known as the "console".
On Windows systems you are encouraged to install and use the
[Powershell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.3),
which provides compatibility with Linux and Mac shells and nice features such as
command-line completion.
### Conda method
1. Check that your system meets the
[hardware requirements](index.md#Hardware_Requirements) and has the
appropriate GPU drivers installed. In particular, if you are a Linux user
with an AMD GPU installed, you may need to install the
[ROCm driver](https://rocmdocs.amd.com/en/latest/Installation_Guide/Installation-Guide.html).
InvokeAI does not yet support Windows machines with AMD GPUs due to the lack
of ROCm driver support on this platform.
To confirm that the appropriate drivers are installed, run `nvidia-smi` on
NVIDIA/CUDA systems, and `rocm-smi` on AMD systems. These should return
information about the installed video card.
Macintosh users with MPS acceleration, or anybody with a CPU-only system,
can skip this step.
2. You will need to install Anaconda3 and Git if they are not already
available. Use your operating system's preferred package manager, or
download the installers manually. You can find them here:
- [Anaconda3](https://www.anaconda.com/)
- [git](https://git-scm.com/downloads)
3. Clone the [InvokeAI](https://github.com/invoke-ai/InvokeAI) source code from
GitHub:
```bash
git clone https://github.com/invoke-ai/InvokeAI.git
```
This will create InvokeAI folder where you will follow the rest of the
steps.
4. Enter the newly-created InvokeAI folder:
```bash
cd InvokeAI
```
From this step forward make sure that you are working in the InvokeAI
directory!
5. Select the appropriate environment file:
We have created a series of environment files suited for different operating
systems and GPU hardware. They are located in the
`environments-and-requirements` directory:
<figure markdown>
| filename | OS |
| :----------------------: | :----------------------------: |
| environment-lin-amd.yml | Linux with an AMD (ROCm) GPU |
| environment-lin-cuda.yml | Linux with an NVIDIA CUDA GPU |
| environment-mac.yml | Macintosh |
| environment-win-cuda.yml | Windows with an NVIDA CUDA GPU |
</figure>
Choose the appropriate environment file for your system and link or copy it
to `environment.yml` in InvokeAI's top-level directory. To do so, run
following command from the repository-root:
!!! Example ""
=== "Macintosh and Linux"
!!! todo "Replace `xxx` and `yyy` with the appropriate OS and GPU codes as seen in the table above"
```bash
ln -sf environments-and-requirements/environment-xxx-yyy.yml environment.yml
```
When this is done, confirm that a file `environment.yml` has been linked in
the InvokeAI root directory and that it points to the correct file in the
`environments-and-requirements`.
```bash
ls -la
```
=== "Windows"
!!! todo " Since it requires admin privileges to create links, we will use the copy command to create your `environment.yml`"
```cmd
copy environments-and-requirements\environment-win-cuda.yml environment.yml
```
Afterwards verify that the file `environment.yml` has been created, either via the
explorer or by using the command `dir` from the terminal
```cmd
dir
```
!!! warning "Do not try to run conda on directly on the subdirectory environments file. This won't work. Instead, copy or link it to the top-level directory as shown."
6. Create the conda environment:
```bash
conda env update
```
This will create a new environment named `invokeai` and install all InvokeAI
dependencies into it. If something goes wrong you should take a look at
[troubleshooting](#troubleshooting).
7. Activate the `invokeai` environment:
In order to use the newly created environment you will first need to
activate it
```bash
conda activate invokeai
```
Your command-line prompt should change to indicate that `invokeai` is active
by prepending `(invokeai)`.
8. Pre-Load the model weights files:
!!! tip
If you have already downloaded the weights file(s) for another Stable
Diffusion distribution, you may skip this step (by selecting "skip" when
prompted) and configure InvokeAI to use the previously-downloaded files. The
process for this is described in [here](050_INSTALLING_MODELS.md).
```bash
python scripts/configure_invokeai.py
```
The script `configure_invokeai.py` will interactively guide you through the
process of downloading and installing the weights files needed for InvokeAI.
Note that the main Stable Diffusion weights file is protected by a license
agreement that you have to agree to. The script will list the steps you need
to take to create an account on the site that hosts the weights files,
accept the agreement, and provide an access token that allows InvokeAI to
legally download and install the weights files.
If you get an error message about a module not being installed, check that
the `invokeai` environment is active and if not, repeat step 5.
9. Run the command-line- or the web- interface:
!!! example ""
!!! warning "Make sure that the conda environment is activated, which should create `(invokeai)` in front of your prompt!"
=== "CLI"
```bash
python scripts/invoke.py
```
=== "local Webserver"
```bash
python scripts/invoke.py --web
```
=== "Public Webserver"
```bash
python scripts/invoke.py --web --host 0.0.0.0
```
If you choose the run the web interface, point your browser at
http://localhost:9090 in order to load the GUI.
10. Render away!
Browse the [features](../features/CLI.md) section to learn about all the things you
can do with InvokeAI.
Note that some GPUs are slow to warm up. In particular, when using an AMD
card with the ROCm driver, you may have to wait for over a minute the first
time you try to generate an image. Fortunately, after the warm up period
rendering will be fast.
11. Subsequently, to relaunch the script, be sure to run "conda activate
invokeai", enter the `InvokeAI` directory, and then launch the invoke
script. If you forget to activate the 'invokeai' environment, the script
will fail with multiple `ModuleNotFound` errors.
## Updating to newer versions of the script
This distribution is changing rapidly. If you used the `git clone` method
(step 5) to download the InvokeAI directory, then to update to the latest and
greatest version, launch the Anaconda window, enter `InvokeAI` and type:
```bash
git pull
conda env update
python scripts/configure_invokeai.py --no-interactive #optional
```
This will bring your local copy into sync with the remote one. The last step may
be needed to take advantage of new features or released models. The
`--no-interactive` flag will prevent the script from prompting you to download
the big Stable Diffusion weights files.
## pip Install
To install InvokeAI with only the PIP package manager, please follow these
steps:
1. Make sure you are using Python 3.9 or higher. The rest of the install
procedure depends on this:
```bash
python -V
```
2. Install the `virtualenv` tool if you don't have it already:
```bash
pip install virtualenv
```
3. From within the InvokeAI top-level directory, create and activate a virtual
environment named `invokeai`:
```bash
virtualenv invokeai
source invokeai/bin/activate
```
4. Run PIP
```bash
pip --python invokeai install --use-pep517 .
```
---
## Troubleshooting
Here are some common issues and their suggested solutions.
### Conda
#### Conda fails before completing `conda update`
The usual source of these errors is a package incompatibility. While we have
tried to minimize these, over time packages get updated and sometimes introduce
incompatibilities.
We suggest that you search
[Issues](https://github.com/invoke-ai/InvokeAI/issues) or the "bugs-and-support"
channel of the [InvokeAI Discord](https://discord.gg/ZmtBAhwWhy).
You may also try to install the broken packages manually using PIP. To do this,
activate the `invokeai` environment, and run `pip install` with the name and
version of the package that is causing the incompatibility. For example:
```bash
pip install test-tube==0.7.5
```
You can keep doing this until all requirements are satisfied and the `invoke.py`
script runs without errors. Please report to
[Issues](https://github.com/invoke-ai/InvokeAI/issues) what you were able to do
to work around the problem so that others can benefit from your investigation.
### Create Conda Environment fails on MacOS
If conda create environment fails with lmdb error, this is most likely caused by Clang.
Run brew config to see which Clang is installed on your Mac. If Clang isn't installed, that's causing the error.
Start by installing additional XCode command line tools, followed by brew install llvm.
```bash
xcode-select --install
brew install llvm
```
If brew config has Clang installed, update to the latest llvm and try creating the environment again.
#### `configure_invokeai.py` or `invoke.py` crashes at an early stage
This is usually due to an incomplete or corrupted Conda install. Make sure you
have linked to the correct environment file and run `conda update` again.
If the problem persists, a more extreme measure is to clear Conda's caches and
remove the `invokeai` environment:
```bash
conda deactivate
conda env remove -n invokeai
conda clean -a
conda update
```
This removes all cached library files, including ones that may have been
corrupted somehow. (This is not supposed to happen, but does anyway).
#### `invoke.py` crashes at a later stage
If the CLI or web site had been working ok, but something unexpected happens
later on during the session, you've encountered a code bug that is probably
unrelated to an install issue. Please search
[Issues](https://github.com/invoke-ai/InvokeAI/issues), file a bug report, or
ask for help on [Discord](https://discord.gg/ZmtBAhwWhy)
#### My renders are running very slowly
You may have installed the wrong torch (machine learning) package, and the
system is running on CPU rather than the GPU. To check, look at the log messages
that appear when `invoke.py` is first starting up. One of the earlier lines
should say `Using device type cuda`. On AMD systems, it will also say "cuda",
and on Macintoshes, it should say "mps". If instead the message says it is
running on "cpu", then you may need to install the correct torch library.
You may be able to fix this by installing a different torch library. Here are
the magic incantations for Conda and PIP.
!!! todo "For CUDA systems"
- conda
```bash
conda install pytorch torchvision torchaudio pytorch-cuda=11.6 -c pytorch -c nvidia
```
- pip
```bash
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
```
!!! todo "For AMD systems"
- conda
```bash
conda activate invokeai
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
```
- pip
```bash
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2/
```
More information and troubleshooting tips can be found at https://pytorch.org.

View File

@ -129,7 +129,14 @@ class Installer:
else:
venv_dir = self.dest / ".venv"
venv.create(venv_dir, with_pip=True)
# Prefer to copy python executables
# so that updates to system python don't break InvokeAI
try:
venv.create(venv_dir, with_pip=True)
# If installing over an existing environment previously created with symlinks,
# the executables will fail to copy. Keep symlinks in that case
except shutil.SameFileError:
venv.create(venv_dir, with_pip=True, symlinks=True)
# upgrade pip in Python 3.9 environments
if int(platform.python_version_tuple()[1]) == 9:

View File

@ -50,7 +50,7 @@ def main():
Globals.internet_available = args.internet_available and check_internet()
Globals.disable_xformers = not args.xformers
Globals.ckpt_convert = args.ckpt_convert
print(f'>> Internet connectivity is {Globals.internet_available}')
if not args.conf:
@ -1111,9 +1111,13 @@ def write_commands(opt, file_path:str, outfilepath:str):
def report_model_error(opt:Namespace, e:Exception):
print(f'** An error occurred while attempting to initialize the model: "{str(e)}"')
print('** This can be caused by a missing or corrupted models file, and can sometimes be fixed by (re)installing the models.')
response = input('Do you want to run invokeai-configure script to select and/or reinstall models? [y] ')
if response.startswith(('n','N')):
return
yes_to_all = os.environ.get('INVOKE_MODEL_RECONFIGURE')
if yes_to_all:
print('** Reconfiguration is being forced by environment variable INVOKE_MODEL_RECONFIGURE')
else:
response = input('Do you want to run invokeai-configure script to select and/or reinstall models? [y] ')
if response.startswith(('n', 'N')):
return
print('invokeai-configure is launching....\n')
@ -1121,13 +1125,13 @@ def report_model_error(opt:Namespace, e:Exception):
# only the arguments accepted by the configuration script are parsed
root_dir = ["--root", opt.root_dir] if opt.root_dir is not None else []
config = ["--config", opt.conf] if opt.conf is not None else []
yes_to_all = os.environ.get('INVOKE_MODEL_RECONFIGURE')
previous_args = sys.argv
sys.argv = [ 'invokeai-configure' ]
sys.argv.extend(root_dir)
sys.argv.extend(config)
if yes_to_all is not None:
sys.argv.append(yes_to_all)
for arg in yes_to_all.split():
sys.argv.append(arg)
from ldm.invoke.config import configure_invokeai
configure_invokeai.main()

View File

@ -530,8 +530,8 @@ def update_config_file(successfully_downloaded: dict, opt: dict):
# this check is ignored if opt.config_file is specified - user is assumed to know what they
# are doing if they are passing a custom config file from elsewhere.
if config_file is Default_config_file and not config_file.parent.exists():
configs_src = Path(__file__).parent / "configs"
configs_dest = Path(Globals.root) / "configs"
configs_src = Dataset_path.parent
configs_dest = Default_config_file.parent
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
yaml = new_config_file_contents(successfully_downloaded, config_file)

View File

@ -5,6 +5,7 @@ used to merge 2-3 models together and create a new InvokeAI-registered diffusion
Copyright (c) 2023 Lincoln Stein and the InvokeAI Development Team
"""
import argparse
import curses
import os
import sys
from argparse import Namespace
@ -12,6 +13,7 @@ from pathlib import Path
from typing import List, Union
import npyscreen
import warnings
from diffusers import DiffusionPipeline
from omegaconf import OmegaConf
@ -26,7 +28,6 @@ from ldm.invoke.model_manager import ModelManager
DEST_MERGED_MODEL_DIR = "merged_models"
def merge_diffusion_models(
model_ids_or_paths: List[Union[str, Path]],
alpha: float = 0.5,
@ -185,6 +186,8 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
def __init__(self, parentApp, name):
self.parentApp = parentApp
self.ALLOW_RESIZE=True
self.FIX_MINIMUM_SIZE_WHEN_CREATED=False
super().__init__(parentApp, name)
@property
@ -195,29 +198,94 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
self.parentApp.setNextForm(None)
def create(self):
window_height,window_width=curses.initscr().getmaxyx()
self.model_names = self.get_model_names()
max_width = max([len(x) for x in self.model_names])
max_width += 6
horizontal_layout = max_width*3 < window_width
self.add_widget_intelligent(
npyscreen.FixedText, name="Select up to three models to merge", value=""
npyscreen.FixedText,
color='CONTROL',
value=f"Select two models to merge and optionally a third.",
editable=False,
)
self.models = self.add_widget_intelligent(
npyscreen.TitleMultiSelect,
name="Select two to three models to merge:",
self.add_widget_intelligent(
npyscreen.FixedText,
color='CONTROL',
value=f"Use up and down arrows to move, <space> to select an item, <tab> and <shift-tab> to move from one field to the next.",
editable=False,
)
self.add_widget_intelligent(
npyscreen.FixedText,
value='MODEL 1',
color='GOOD',
editable=False,
rely=4 if horizontal_layout else None,
)
self.model1 = self.add_widget_intelligent(
npyscreen.SelectOne,
values=self.model_names,
value=None,
max_height=len(self.model_names) + 1,
value=0,
max_height=len(self.model_names),
max_width=max_width,
scroll_exit=True,
rely=5,
)
self.add_widget_intelligent(
npyscreen.FixedText,
value='MODEL 2',
color='GOOD',
editable=False,
relx=max_width+3 if horizontal_layout else None,
rely=4 if horizontal_layout else None,
)
self.model2 = self.add_widget_intelligent(
npyscreen.SelectOne,
name='(2)',
values=self.model_names,
value=1,
max_height=len(self.model_names),
max_width=max_width,
relx=max_width+3 if horizontal_layout else None,
rely=5 if horizontal_layout else None,
scroll_exit=True,
)
self.models.when_value_edited = self.models_changed
self.add_widget_intelligent(
npyscreen.FixedText,
value='MODEL 3',
color='GOOD',
editable=False,
relx=max_width*2+3 if horizontal_layout else None,
rely=4 if horizontal_layout else None,
)
models_plus_none = self.model_names.copy()
models_plus_none.insert(0,'None')
self.model3 = self.add_widget_intelligent(
npyscreen.SelectOne,
name='(3)',
values=models_plus_none,
value=0,
max_height=len(self.model_names)+1,
max_width=max_width,
scroll_exit=True,
relx=max_width*2+3 if horizontal_layout else None,
rely=5 if horizontal_layout else None,
)
for m in [self.model1,self.model2,self.model3]:
m.when_value_edited = self.models_changed
self.merged_model_name = self.add_widget_intelligent(
npyscreen.TitleText,
name="Name for merged model:",
labelColor='CONTROL',
value="",
scroll_exit=True,
)
self.force = self.add_widget_intelligent(
npyscreen.Checkbox,
name="Force merge of incompatible models",
labelColor='CONTROL',
value=False,
scroll_exit=True,
)
@ -226,6 +294,7 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
name="Merge Method:",
values=self.interpolations,
value=0,
labelColor='CONTROL',
max_height=len(self.interpolations) + 1,
scroll_exit=True,
)
@ -236,47 +305,53 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
step=0.05,
lowest=0,
value=0.5,
labelColor='CONTROL',
scroll_exit=True,
)
self.models.editing = True
self.model1.editing = True
def models_changed(self):
model_names = self.models.values
selected_models = self.models.value
if len(selected_models) > 3:
npyscreen.notify_confirm(
"Too many models selected for merging. Select two to three."
)
return
elif len(selected_models) > 2:
self.merge_method.values = ["add_difference"]
self.merge_method.value = 0
models = self.model1.values
selected_model1 = self.model1.value[0]
selected_model2 = self.model2.value[0]
selected_model3 = self.model3.value[0]
merged_model_name = f'{models[selected_model1]}+{models[selected_model2]}'
self.merged_model_name.value = merged_model_name
if selected_model3 > 0:
self.merge_method.values=['add_difference'],
self.merged_model_name.value += f'+{models[selected_model3]}'
else:
self.merge_method.values = self.interpolations
self.merged_model_name.value = "+".join(
[model_names[x] for x in selected_models]
)
self.merge_method.values=self.interpolations
self.merge_method.value=0
def on_ok(self):
if self.validate_field_values() and self.check_for_overwrite():
self.parentApp.setNextForm(None)
self.editing = False
self.parentApp.merge_arguments = self.marshall_arguments()
npyscreen.notify("Starting the merge...")
npyscreen.notify('Starting the merge...')
else:
self.editing = True
def on_cancel(self):
sys.exit(0)
def marshall_arguments(self) -> dict:
models = [self.models.values[x] for x in self.models.value]
def marshall_arguments(self)->dict:
model_names = self.model_names
models = [
model_names[self.model1.value[0]],
model_names[self.model2.value[0]],
]
if self.model3.value[0] > 0:
models.append(model_names[self.model3.value[0]-1])
args = dict(
models=models,
alpha=self.alpha.value,
interp=self.interpolations[self.merge_method.value[0]],
force=self.force.value,
merged_model_name=self.merged_model_name.value,
alpha = self.alpha.value,
interp = self.interpolations[self.merge_method.value[0]],
force = self.force.value,
merged_model_name = self.merged_model_name.value,
)
return args
@ -289,15 +364,18 @@ class mergeModelsForm(npyscreen.FormMultiPageAction):
f"The chosen merged model destination, {model_out}, is already in use. Overwrite?"
)
def validate_field_values(self) -> bool:
def validate_field_values(self)->bool:
bad_fields = []
selected_models = self.models.value
if len(selected_models) < 2 or len(selected_models) > 3:
bad_fields.append("Please select two or three models to merge.")
model_names = self.model_names
selected_models = set((model_names[self.model1.value[0]],model_names[self.model2.value[0]]))
if self.model3.value[0] > 0:
selected_models.add(model_names[self.model3.value[0]-1])
if len(selected_models) < 2:
bad_fields.append(f'Please select two or three DIFFERENT models to compare. You selected {selected_models}')
if len(bad_fields) > 0:
message = "The following problems were detected and must be corrected:"
message = 'The following problems were detected and must be corrected:'
for problem in bad_fields:
message += f"\n* {problem}"
message += f'\n* {problem}'
npyscreen.notify_confirm(message)
return False
else:
@ -322,10 +400,9 @@ class Mergeapp(npyscreen.NPSAppManaged):
) # precision doesn't really matter here
def onStart(self):
npyscreen.setTheme(npyscreen.Themes.DefaultTheme)
npyscreen.setTheme(npyscreen.Themes.ElegantTheme)
self.main = self.addForm("MAIN", mergeModelsForm, name="Merge Models Settings")
def run_gui(args: Namespace):
mergeapp = Mergeapp()
mergeapp.run()
@ -338,8 +415,8 @@ def run_gui(args: Namespace):
def run_cli(args: Namespace):
assert args.alpha >= 0 and args.alpha <= 1.0, "alpha must be between 0 and 1"
assert (
len(args.models) >= 1 and len(args.models) <= 3
), "provide 2 or 3 models to merge"
args.models and len(args.models) >= 1 and len(args.models) <= 3
), "Please provide the --models argument to list 2 to 3 models to merge. Use --help for full usage."
if not args.merged_model_name:
args.merged_model_name = "+".join(args.models)
@ -353,6 +430,7 @@ def run_cli(args: Namespace):
), f'A model named "{args.merged_model_name}" already exists. Use --clobber to overwrite.'
merge_diffusion_models_and_commit(**vars(args))
print(f'>> Models merged into new model: "{args.merged_model_name}".')
def main():
@ -365,17 +443,22 @@ def main():
] = cache_dir # because not clear the merge pipeline is honoring cache_dir
args.cache_dir = cache_dir
try:
if args.front_end:
run_gui(args)
else:
run_cli(args)
print(f">> Conversion successful. New model is named {args.merged_model_name}")
except Exception as e:
print(f"** An error occurred while merging the pipelines: {str(e)}")
sys.exit(-1)
except KeyboardInterrupt:
sys.exit(-1)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
try:
if args.front_end:
run_gui(args)
else:
run_cli(args)
print(f'>> Conversion successful.')
except Exception as e:
if str(e).startswith('Not enough space'):
print('** Not enough horizontal space! Try making the window wider, or relaunch with a smaller starting size.')
else:
print(f"** An error occurred while merging the pipelines: {str(e)}")
sys.exit(-1)
except KeyboardInterrupt:
sys.exit(-1)
if __name__ == "__main__":
main()