mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Merge remote-tracking branch 'origin/main' into dev/diffusers
This commit is contained in:
commit
a0eb30a9b9
@ -1,12 +1,26 @@
|
|||||||
*
|
*
|
||||||
!backend
|
!backend
|
||||||
!configs
|
|
||||||
!environments-and-requirements
|
|
||||||
!frontend
|
!frontend
|
||||||
!installer
|
!binary_installer
|
||||||
!ldm
|
!ldm
|
||||||
!main.py
|
!main.py
|
||||||
!scripts
|
!scripts
|
||||||
!server
|
!server
|
||||||
!static
|
!static
|
||||||
!setup.py
|
!setup.py
|
||||||
|
!docker-build
|
||||||
|
!docs
|
||||||
|
docker-build/Dockerfile
|
||||||
|
|
||||||
|
# Guard against pulling in any models that might exist in the directory tree
|
||||||
|
**/*.pt*
|
||||||
|
|
||||||
|
# unignore configs, but only ignore the custom models.yaml, in case it exists
|
||||||
|
!configs
|
||||||
|
configs/models.yaml
|
||||||
|
|
||||||
|
# unignore environment dirs/files, but ignore the environment.yml file or symlink in case it exists
|
||||||
|
!environment*
|
||||||
|
environment.yml
|
||||||
|
|
||||||
|
**/__pycache__
|
||||||
|
64
.github/workflows/build-cloud-img.yml
vendored
Normal file
64
.github/workflows/build-cloud-img.yml
vendored
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
name: Build and push cloud image
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
- development
|
||||||
|
tags:
|
||||||
|
- v*
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
env:
|
||||||
|
REGISTRY: ghcr.io
|
||||||
|
IMAGE_NAME: ${{ github.repository }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
docker:
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
# only x86_64 for now. aarch64+cuda isn't really a thing yet
|
||||||
|
arch:
|
||||||
|
- x86_64
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: ${{ matrix.arch }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v4
|
||||||
|
with:
|
||||||
|
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||||
|
tags: |
|
||||||
|
type=ref,event=branch
|
||||||
|
type=ref,event=tag
|
||||||
|
type=ref,event=pr
|
||||||
|
type=sha
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
|
# - if: github.event_name != 'pull_request'
|
||||||
|
# name: Docker login
|
||||||
|
# uses: docker/login-action@v2
|
||||||
|
# with:
|
||||||
|
# registry: ghcr.io
|
||||||
|
# username: ${{ github.actor }}
|
||||||
|
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push cloud image
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: docker-build/Dockerfile.cloud
|
||||||
|
platforms: Linux/${{ matrix.arch }}
|
||||||
|
# push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
push: false
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
55
README.md
55
README.md
@ -1,11 +1,9 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
||||||
|
![project logo](docs/assets/invoke_ai_banner.png)
|
||||||
|
|
||||||
# InvokeAI: A Stable Diffusion Toolkit
|
# InvokeAI: A Stable Diffusion Toolkit
|
||||||
|
|
||||||
_Formerly known as lstein/stable-diffusion_
|
|
||||||
|
|
||||||
![project logo](docs/assets/logo.png)
|
|
||||||
|
|
||||||
[![discord badge]][discord link]
|
[![discord badge]][discord link]
|
||||||
|
|
||||||
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
[![latest release badge]][latest release link] [![github stars badge]][github stars link] [![github forks badge]][github forks link]
|
||||||
@ -48,7 +46,10 @@ _Note: InvokeAI is rapidly evolving. Please use the
|
|||||||
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
|
||||||
requests. Be sure to use the provided templates. They will help us diagnose issues faster._
|
requests. Be sure to use the provided templates. They will help us diagnose issues faster._
|
||||||
|
|
||||||
## Installation Quick-Start
|
# Getting Started with InvokeAI
|
||||||
|
|
||||||
|
For full installation and upgrade instructions, please see:
|
||||||
|
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
||||||
|
|
||||||
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.2.3)
|
1. Go to the bottom of the [Latest Release Page](https://github.com/invoke-ai/InvokeAI/releases/tag/v2.2.3)
|
||||||
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
2. Download the .zip file for your OS (Windows/macOS/Linux).
|
||||||
@ -62,8 +63,6 @@ requests. Be sure to use the provided templates. They will help us diagnose issu
|
|||||||
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
<div align="center"><img src="docs/assets/invoke-web-server-1.png" width=640></div>
|
||||||
|
|
||||||
|
|
||||||
For full installation and upgrade instructions, please see:
|
|
||||||
[InvokeAI Installation Overview](https://invoke-ai.github.io/InvokeAI/installation/)
|
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
@ -118,11 +117,12 @@ Similarly, specify full-precision mode on Apple M1 hardware.
|
|||||||
|
|
||||||
Precision is auto configured based on the device. If however you encounter
|
Precision is auto configured based on the device. If however you encounter
|
||||||
errors like 'expected type Float but found Half' or 'not implemented for Half'
|
errors like 'expected type Float but found Half' or 'not implemented for Half'
|
||||||
you can try starting `invoke.py` with the `--precision=float32` flag:
|
you can try starting `invoke.py` with the `--precision=float32` flag to your initialization command
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
(invokeai) ~/InvokeAI$ python scripts/invoke.py --precision=float32
|
||||||
```
|
```
|
||||||
|
Or by updating your InvokeAI configuration file with this argument.
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
||||||
@ -151,39 +151,7 @@ you can try starting `invoke.py` with the `--precision=float32` flag:
|
|||||||
|
|
||||||
### Latest Changes
|
### Latest Changes
|
||||||
|
|
||||||
- v2.0.1 (13 November 2022)
|
For our latest changes, view our [Release Notes](https://github.com/invoke-ai/InvokeAI/releases)
|
||||||
- fix noisy images at high step count when using k* samplers
|
|
||||||
- dream.py script now calls invoke.py module directly rather than
|
|
||||||
via a new python process (which could break the environment)
|
|
||||||
|
|
||||||
- v2.0.0 (9 November 2022)
|
|
||||||
|
|
||||||
- `dream.py` script renamed `invoke.py`. A `dream.py` script wrapper remains
|
|
||||||
for backward compatibility.
|
|
||||||
- Completely new WebGUI - launch with `python3 scripts/invoke.py --web`
|
|
||||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/INPAINTING/">inpainting</a> and <a href="https://invoke-ai.github.io/InvokeAI/features/OUTPAINTING/">outpainting</a>
|
|
||||||
- img2img runs on all k* samplers
|
|
||||||
- Support for <a href="https://invoke-ai.github.io/InvokeAI/features/PROMPTS/#negative-and-unconditioned-prompts">negative prompts</a>
|
|
||||||
- Support for CodeFormer face reconstruction
|
|
||||||
- Support for Textual Inversion on macOS
|
|
||||||
- Support in both WebGUI and CLI for <a href="https://invoke-ai.github.io/InvokeAI/features/POSTPROCESS/">post-processing of previously-generated images</a>
|
|
||||||
using facial reconstruction, ESRGAN upscaling, outcropping (similar to DALL-E infinite canvas),
|
|
||||||
and "embiggen" upscaling. See the `!fix` command.
|
|
||||||
- New `--hires` option on `invoke>` line allows <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/#txt2img">larger images to be created without duplicating elements</a>, at the cost of some performance.
|
|
||||||
- New `--perlin` and `--threshold` options allow you to add and control variation
|
|
||||||
during image generation (see <a href="https://github.com/invoke-ai/InvokeAI/blob/main/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options">Thresholding and Perlin Noise Initialization</a>
|
|
||||||
- Extensive metadata now written into PNG files, allowing reliable regeneration of images
|
|
||||||
and tweaking of previous settings.
|
|
||||||
- Command-line completion in `invoke.py` now works on Windows, Linux and macOS platforms.
|
|
||||||
- Improved <a href="https://invoke-ai.github.io/InvokeAI/features/CLI/">command-line completion behavior</a>.
|
|
||||||
New commands added:
|
|
||||||
- List command-line history with `!history`
|
|
||||||
- Search command-line history with `!search`
|
|
||||||
- Clear history with `!clear`
|
|
||||||
- Deprecated `--full_precision` / `-F`. Simply omit it and `invoke.py` will auto
|
|
||||||
configure. To switch away from auto use the new flag like `--precision=float32`.
|
|
||||||
|
|
||||||
For older changelogs, please visit the **[CHANGELOG](https://invoke-ai.github.io/InvokeAI/CHANGELOG#v114-11-september-2022)**.
|
|
||||||
|
|
||||||
### Troubleshooting
|
### Troubleshooting
|
||||||
|
|
||||||
@ -193,8 +161,9 @@ problems and other issues.
|
|||||||
# Contributing
|
# Contributing
|
||||||
|
|
||||||
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
Anyone who wishes to contribute to this project, whether documentation, features, bug fixes, code
|
||||||
cleanup, testing, or code reviews, is very much encouraged to do so. To join, just raise your hand on the InvokeAI
|
cleanup, testing, or code reviews, is very much encouraged to do so.
|
||||||
Discord server or discussion board.
|
|
||||||
|
To join, just raise your hand on the InvokeAI Discord server (#dev-chat) or the GitHub discussion board.
|
||||||
|
|
||||||
If you are unfamiliar with how
|
If you are unfamiliar with how
|
||||||
to contribute to GitHub projects, here is a
|
to contribute to GitHub projects, here is a
|
||||||
|
@ -2,6 +2,10 @@
|
|||||||
|
|
||||||
set -eu
|
set -eu
|
||||||
|
|
||||||
|
# ensure we're in the correct folder in case user's CWD is somewhere else
|
||||||
|
scriptdir=$(dirname "$0")
|
||||||
|
cd "$scriptdir"
|
||||||
|

|
||||||
. .venv/bin/activate
|
. .venv/bin/activate
|
||||||
|
|
||||||
# set required env var for torch on mac MPS
|
# set required env var for torch on mac MPS
|
||||||
|
@ -32,7 +32,7 @@ model:
|
|||||||
placeholder_strings: ["*"]
|
placeholder_strings: ["*"]
|
||||||
initializer_words: ['sculpture']
|
initializer_words: ['sculpture']
|
||||||
per_image_tokens: false
|
per_image_tokens: false
|
||||||
num_vectors_per_token: 8
|
num_vectors_per_token: 1
|
||||||
progressive_words: False
|
progressive_words: False
|
||||||
|
|
||||||
unet_config:
|
unet_config:
|
||||||
|
86
docker-build/Dockerfile.cloud
Normal file
86
docker-build/Dockerfile.cloud
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
#######################
|
||||||
|
#### Builder stage ####
|
||||||
|
|
||||||
|
FROM library/ubuntu:22.04 AS builder
|
||||||
|
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
||||||
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
|
apt update && apt-get install -y \
|
||||||
|
git \
|
||||||
|
libglib2.0-0 \
|
||||||
|
libgl1-mesa-glx \
|
||||||
|
python3-venv \
|
||||||
|
python3-pip \
|
||||||
|
build-essential \
|
||||||
|
python3-opencv \
|
||||||
|
libopencv-dev
|
||||||
|
|
||||||
|
# This is needed for patchmatch support
|
||||||
|
RUN cd /usr/lib/x86_64-linux-gnu/pkgconfig/ &&\
|
||||||
|
ln -sf opencv4.pc opencv.pc
|
||||||
|
|
||||||
|
ARG WORKDIR=/invokeai
|
||||||
|
WORKDIR ${WORKDIR}
|
||||||
|
|
||||||
|
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
||||||
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
|
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||||
|
python3 -m venv ${VIRTUAL_ENV} &&\
|
||||||
|
pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
|
||||||
|
torch==1.12.0+cu116 \
|
||||||
|
torchvision==0.13.0+cu116 &&\
|
||||||
|
pip install -e git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||||
|
cp binary_installer/py3.10-linux-x86_64-cuda-reqs.txt requirements.txt && \
|
||||||
|
pip install -r requirements.txt &&\
|
||||||
|
pip install -e .
|
||||||
|
|
||||||
|
|
||||||
|
#######################
|
||||||
|
#### Runtime stage ####
|
||||||
|
|
||||||
|
FROM library/ubuntu:22.04 as runtime
|
||||||
|
|
||||||
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
|
ENV PYTHONUNBUFFERED=1
|
||||||
|
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||||
|
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||||
|
apt update && apt install -y --no-install-recommends \
|
||||||
|
git \
|
||||||
|
curl \
|
||||||
|
ncdu \
|
||||||
|
iotop \
|
||||||
|
bzip2 \
|
||||||
|
libglib2.0-0 \
|
||||||
|
libgl1-mesa-glx \
|
||||||
|
python3-venv \
|
||||||
|
python3-pip \
|
||||||
|
build-essential \
|
||||||
|
python3-opencv \
|
||||||
|
libopencv-dev &&\
|
||||||
|
apt-get clean && apt-get autoclean
|
||||||
|
|
||||||
|
ARG WORKDIR=/invokeai
|
||||||
|
WORKDIR ${WORKDIR}
|
||||||
|
|
||||||
|
ENV INVOKEAI_ROOT=/mnt/invokeai
|
||||||
|
ENV VIRTUAL_ENV=${WORKDIR}/.venv
|
||||||
|
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||||
|
|
||||||
|
COPY --from=builder ${WORKDIR} ${WORKDIR}
|
||||||
|
COPY --from=builder /usr/lib/x86_64-linux-gnu/pkgconfig /usr/lib/x86_64-linux-gnu/pkgconfig
|
||||||
|
|
||||||
|
# build patchmatch
|
||||||
|
RUN python -c "from patchmatch import patch_match"
|
||||||
|
|
||||||
|
## workaround for non-existent initfile when runtime directory is mounted; see #1613
|
||||||
|
RUN touch /root/.invokeai
|
||||||
|
|
||||||
|
ENTRYPOINT ["bash"]
|
||||||
|
|
||||||
|
CMD ["-c", "python3 scripts/invoke.py --web --host 0.0.0.0"]
|
44
docker-build/Makefile
Normal file
44
docker-build/Makefile
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
# Directory in the container where the INVOKEAI_ROOT (runtime dir) will be mounted
|
||||||
|
INVOKEAI_ROOT=/mnt/invokeai
|
||||||
|
# Host directory to contain the runtime dir. Will be mounted at INVOKEAI_ROOT path in the container
|
||||||
|
HOST_MOUNT_PATH=${HOME}/invokeai
|
||||||
|
|
||||||
|
IMAGE=local/invokeai:latest
|
||||||
|
|
||||||
|
USER=$(shell id -u)
|
||||||
|
GROUP=$(shell id -g)
|
||||||
|
|
||||||
|
# All downloaded models, config, etc will end up in ${HOST_MOUNT_PATH} on the host.
|
||||||
|
# This is consistent with the expected non-Docker behaviour.
|
||||||
|
# Contents can be moved to a persistent storage and used to prime the cache on another host.
|
||||||
|
|
||||||
|
build:
|
||||||
|
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
||||||
|
|
||||||
|
configure:
|
||||||
|
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||||
|
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||||
|
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||||
|
${IMAGE} -c "python scripts/configure_invokeai.py"
|
||||||
|
|
||||||
|
# Run the container with the runtime dir mounted and the web server exposed on port 9090
|
||||||
|
web:
|
||||||
|
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||||
|
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||||
|
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||||
|
-p 9090:9090 \
|
||||||
|
${IMAGE} -c "python scripts/invoke.py --web --host 0.0.0.0"
|
||||||
|
|
||||||
|
# Run the cli with the runtime dir mounted
|
||||||
|
cli:
|
||||||
|
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||||
|
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} \
|
||||||
|
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
|
||||||
|
${IMAGE} -c "python scripts/invoke.py"
|
||||||
|
|
||||||
|
# Run the container with the runtime dir mounted and open a bash shell
|
||||||
|
shell:
|
||||||
|
docker run --rm -it --runtime=nvidia --gpus=all \
|
||||||
|
-v ${HOST_MOUNT_PATH}:${INVOKEAI_ROOT} ${IMAGE} --
|
||||||
|
|
||||||
|
.PHONY: build configure web cli shell
|
@ -6,7 +6,7 @@ title: Docker
|
|||||||
|
|
||||||
!!! warning "For end users"
|
!!! warning "For end users"
|
||||||
|
|
||||||
We highly recommend to Install InvokeAI locally using [these instructions](index.md)"
|
We highly recommend to Install InvokeAI locally using [these instructions](index.md)
|
||||||
|
|
||||||
!!! tip "For developers"
|
!!! tip "For developers"
|
||||||
|
|
||||||
@ -16,6 +16,10 @@ title: Docker
|
|||||||
|
|
||||||
For general use, install locally to leverage your machine's GPU.
|
For general use, install locally to leverage your machine's GPU.
|
||||||
|
|
||||||
|
!!! tip "For running on a cloud instance/service"
|
||||||
|
|
||||||
|
Check out the [Running InvokeAI in the cloud with Docker](#running-invokeai-in-the-cloud-with-docker) section below
|
||||||
|
|
||||||
## Why containers?
|
## Why containers?
|
||||||
|
|
||||||
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
They provide a flexible, reliable way to build and deploy InvokeAI. You'll also
|
||||||
@ -36,7 +40,7 @@ development purposes it's fine. Once you're done with development tasks on your
|
|||||||
laptop you can build for the target platform and architecture and deploy to
|
laptop you can build for the target platform and architecture and deploy to
|
||||||
another environment with NVIDIA GPUs on-premises or in the cloud.
|
another environment with NVIDIA GPUs on-premises or in the cloud.
|
||||||
|
|
||||||
## Installation on a Linux container
|
## Installation in a Linux container (desktop)
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
@ -117,12 +121,91 @@ also do so.
|
|||||||
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
|
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
|
||||||
```
|
```
|
||||||
|
|
||||||
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.
|
||||||
|
|
||||||
Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments)
|
Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Running InvokeAI in the cloud with Docker
|
||||||
|
|
||||||
|
We offer an optimized Ubuntu-based image that has been well-tested in cloud deployments. Note: it also works well locally on Linux x86_64 systems with an Nvidia GPU. It *may* also work on Windows under WSL2 and on Intel Mac (not tested).
|
||||||
|
|
||||||
|
An advantage of this method is that it does not need any local setup or additional dependencies.
|
||||||
|
|
||||||
|
See the `docker-build/Dockerfile.cloud` file to familizarize yourself with the image's content.
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
- a `docker` runtime
|
||||||
|
- `make` (optional but helps for convenience)
|
||||||
|
- Huggingface token to download models, or an existing InvokeAI runtime directory from a previous installation
|
||||||
|
|
||||||
|
Neither local Python nor any dependencies are required. If you don't have `make` (part of `build-essentials` on Ubuntu), or do not wish to install it, the commands from the `docker-build/Makefile` are readily adaptable to be executed directly.
|
||||||
|
|
||||||
|
### Building and running the image locally
|
||||||
|
|
||||||
|
1. Clone this repo and `cd docker-build`
|
||||||
|
1. `make build` - this will build the image. (This does *not* require a GPU-capable system).
|
||||||
|
1. _(skip this step if you already have a complete InvokeAI runtime directory)_
|
||||||
|
- `make configure` (This does *not* require a GPU-capable system)
|
||||||
|
- this will create a local cache of models and configs (a.k.a the _runtime dir_)
|
||||||
|
- enter your Huggingface token when prompted
|
||||||
|
1. `make web`
|
||||||
|
1. Open the `http://localhost:9090` URL in your browser, and enjoy the banana sushi!
|
||||||
|
|
||||||
|
To use InvokeAI on the cli, run `make cli`. To open a Bash shell in the container for arbitraty advanced use, `make shell`.
|
||||||
|
|
||||||
|
#### Building and running without `make`
|
||||||
|
|
||||||
|
(Feel free to adapt paths such as `${HOME}/invokeai` to your liking, and modify the CLI arguments as necessary).
|
||||||
|
|
||||||
|
!!! example "Build the image and configure the runtime directory"
|
||||||
|
```Shell
|
||||||
|
cd docker-build
|
||||||
|
|
||||||
|
DOCKER_BUILDKIT=1 docker build -t local/invokeai:latest -f Dockerfile.cloud ..
|
||||||
|
|
||||||
|
docker run --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/configure_invokeai.py"
|
||||||
|
```
|
||||||
|
|
||||||
|
!!! example "Run the web server"
|
||||||
|
```Shell
|
||||||
|
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai -p9090:9090 local/invokeai:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
Access the Web UI at http://localhost:9090
|
||||||
|
|
||||||
|
!!! example "Run the InvokeAI interactive CLI"
|
||||||
|
```
|
||||||
|
docker run --runtime=nvidia --gpus=all --rm -it -v ${HOME}/invokeai:/mnt/invokeai local/invokeai:latest -c "python scripts/invoke.py"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Running the image in the cloud
|
||||||
|
|
||||||
|
This image works anywhere you can run a container with a mounted Docker volume. You may either build this image on a cloud instance, or build and push it to your Docker registry. To manually run this on a cloud instance (such as AWS EC2, GCP or Azure VM):
|
||||||
|
|
||||||
|
1. build this image either in the cloud (you'll need to pull the repo), or locally
|
||||||
|
1. `docker tag` it as `your-registry/invokeai` and push to your registry (i.e. Dockerhub)
|
||||||
|
1. `docker pull` it on your cloud instance
|
||||||
|
1. configure the runtime directory as per above example, using `docker run ... configure_invokeai.py` script
|
||||||
|
1. use either one of the `docker run` commands above, substituting the image name for your own image.
|
||||||
|
|
||||||
|
To run this on Runpod, please refer to the following Runpod template: https://www.runpod.io/console/gpu-secure-cloud?template=vm19ukkycf (you need a Runpod subscription). When launching the template, feel free to set the image to pull your own build.
|
||||||
|
|
||||||
|
The template's `README` provides ample detail, but at a high level, the process is as follows:
|
||||||
|
|
||||||
|
1. create a pod using this Docker image
|
||||||
|
1. ensure the pod has an `INVOKEAI_ROOT=<path_to_your_persistent_volume>` environment variable, and that it corresponds to the path to your pod's persistent volume mount
|
||||||
|
1. Run the pod with `sleep infinity` as the Docker command
|
||||||
|
1. Use Runpod basic SSH to connect to the pod, and run `python scripts/configure_invokeai.py` script
|
||||||
|
1. Stop the pod, and change the Docker command to `python scripts/invoke.py --web --host 0.0.0.0`
|
||||||
|
1. Run the pod again, connect to your pod on HTTP port 9090, and enjoy the banana sushi!
|
||||||
|
|
||||||
|
Running on other cloud providers such as Vast.ai will likely work in a similar fashion.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
!!! warning "Deprecated"
|
!!! warning "Deprecated"
|
||||||
|
|
||||||
From here on you will find the the previous Docker-Docs, which will still
|
From here on you will find the the previous Docker-Docs, which will still
|
||||||
@ -135,12 +218,12 @@ also do so.
|
|||||||
If you're on a **Linux container** the `invoke` script is **automatically
|
If you're on a **Linux container** the `invoke` script is **automatically
|
||||||
started** and the output dir set to the Docker volume you created earlier.
|
started** and the output dir set to the Docker volume you created earlier.
|
||||||
|
|
||||||
If you're **directly on macOS follow these startup instructions**.
|
If you're **directly on macOS follow these startup instructions**.
|
||||||
With the Conda environment activated (`conda activate ldm`), run the interactive
|
With the Conda environment activated (`conda activate ldm`), run the interactive
|
||||||
interface that combines the functionality of the original scripts `txt2img` and
|
interface that combines the functionality of the original scripts `txt2img` and
|
||||||
`img2img`:
|
`img2img`:
|
||||||
Use the more accurate but VRAM-intensive full precision math because
|
Use the more accurate but VRAM-intensive full precision math because
|
||||||
half-precision requires autocast and won't work.
|
half-precision requires autocast and won't work.
|
||||||
By default the images are saved in `outputs/img-samples/`.
|
By default the images are saved in `outputs/img-samples/`.
|
||||||
|
|
||||||
```Shell
|
```Shell
|
||||||
@ -157,8 +240,8 @@ invoke> q
|
|||||||
### Text to Image
|
### Text to Image
|
||||||
|
|
||||||
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
For quick (but bad) image results test with 5 steps (default 50) and 1 sample
|
||||||
image. This will let you know that everything is set up correctly.
|
image. This will let you know that everything is set up correctly.
|
||||||
Then increase steps to 100 or more for good (but slower) results.
|
Then increase steps to 100 or more for good (but slower) results.
|
||||||
The prompt can be in quotes or not.
|
The prompt can be in quotes or not.
|
||||||
|
|
||||||
```Shell
|
```Shell
|
||||||
@ -172,8 +255,8 @@ You'll need to experiment to see if face restoration is making it better or
|
|||||||
worse for your specific prompt.
|
worse for your specific prompt.
|
||||||
|
|
||||||
If you're on a container the output is set to the Docker volume. You can copy it
|
If you're on a container the output is set to the Docker volume. You can copy it
|
||||||
wherever you want.
|
wherever you want.
|
||||||
You can download it from the Docker Desktop app, Volumes, my-vol, data.
|
You can download it from the Docker Desktop app, Volumes, my-vol, data.
|
||||||
Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand
|
Or you can copy it from your Mac terminal. Keep in mind `docker cp` can't expand
|
||||||
`*.png` so you'll need to specify the image file name.
|
`*.png` so you'll need to specify the image file name.
|
||||||
|
|
||||||
|
@ -69,6 +69,8 @@ def main():
|
|||||||
if opt.embeddings:
|
if opt.embeddings:
|
||||||
if not os.path.isabs(opt.embedding_path):
|
if not os.path.isabs(opt.embedding_path):
|
||||||
embedding_path = os.path.normpath(os.path.join(Globals.root,opt.embedding_path))
|
embedding_path = os.path.normpath(os.path.join(Globals.root,opt.embedding_path))
|
||||||
|
else:
|
||||||
|
embedding_path = opt.embedding_path
|
||||||
else:
|
else:
|
||||||
embedding_path = None
|
embedding_path = None
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ PRECISION_CHOICES = [
|
|||||||
|
|
||||||
# is there a way to pick this up during git commits?
|
# is there a way to pick this up during git commits?
|
||||||
APP_ID = 'invoke-ai/InvokeAI'
|
APP_ID = 'invoke-ai/InvokeAI'
|
||||||
APP_VERSION = 'v2.2.0'
|
APP_VERSION = 'v2.2.3'
|
||||||
|
|
||||||
class ArgFormatter(argparse.RawTextHelpFormatter):
|
class ArgFormatter(argparse.RawTextHelpFormatter):
|
||||||
# use defined argument order to display usage
|
# use defined argument order to display usage
|
||||||
|
Loading…
Reference in New Issue
Block a user