remove original setup.py

This commit is contained in:
Lincoln Stein 2023-01-24 09:11:05 -05:00
commit 7473d814f5
11 changed files with 239 additions and 264 deletions

View File

@ -8,141 +8,133 @@ on:
- 'ready_for_review'
- 'opened'
- 'synchronize'
- 'converted_to_draft'
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
# fail_if_pull_request_is_draft:
# if: github.event.pull_request.draft == true && github.head_ref != 'dev/diffusers'
# runs-on: ubuntu-18.04
# steps:
# - name: Fails in order to indicate that pull request needs to be marked as ready to review and unit tests workflow needs to pass.
# run: exit 1
matrix:
if: github.event.pull_request.draft == false || github.head_ref == 'dev/diffusers'
if: github.event.pull_request.draft == false
strategy:
matrix:
stable-diffusion-model:
- stable-diffusion-1.5
requirements-file:
- requirements-lin-cuda.txt
- requirements-lin-amd.txt
- requirements-mac-mps-cpu.txt
- requirements-win-colab-cuda.txt
python-version:
# - '3.9'
- '3.10'
pytorch:
- linux-cuda-11_6
- linux-cuda-11_7
- linux-rocm-5_2
- linux-cpu
- macos-default
- windows-cpu
- windows-cuda-11_6
- windows-cuda-11_7
include:
- requirements-file: requirements-lin-cuda.txt
- pytorch: linux-cuda-11_6
os: ubuntu-22.04
extra-index-url: 'https://download.pytorch.org/whl/cu116'
github-env: $GITHUB_ENV
- pytorch: linux-cuda-11_7
os: ubuntu-22.04
github-env: $GITHUB_ENV
- requirements-file: requirements-lin-amd.txt
- pytorch: linux-rocm-5_2
os: ubuntu-22.04
extra-index-url: 'https://download.pytorch.org/whl/rocm5.2'
github-env: $GITHUB_ENV
- requirements-file: requirements-mac-mps-cpu.txt
- pytorch: linux-cpu
os: ubuntu-22.04
extra-index-url: 'https://download.pytorch.org/whl/cpu'
github-env: $GITHUB_ENV
- pytorch: macos-default
os: macOS-12
github-env: $GITHUB_ENV
- requirements-file: requirements-win-colab-cuda.txt
- pytorch: windows-cpu
os: windows-2022
github-env: $env:GITHUB_ENV
name: ${{ matrix.requirements-file }} on ${{ matrix.python-version }}
- pytorch: windows-cuda-11_6
os: windows-2022
extra-index-url: 'https://download.pytorch.org/whl/cu116'
github-env: $env:GITHUB_ENV
- pytorch: windows-cuda-11_7
os: windows-2022
extra-index-url: 'https://download.pytorch.org/whl/cu117'
github-env: $env:GITHUB_ENV
name: ${{ matrix.pytorch }} on ${{ matrix.python-version }}
runs-on: ${{ matrix.os }}
env:
INVOKE_MODEL_RECONFIGURE: '--yes'
INVOKEAI_ROOT: '${{ github.workspace }}/invokeai'
PYTHONUNBUFFERED: 1
HAVE_SECRETS: ${{ secrets.HUGGINGFACE_TOKEN != '' }}
steps:
- name: Checkout sources
id: checkout-sources
uses: actions/checkout@v3
- name: set INVOKEAI_ROOT Windows
if: matrix.os == 'windows-2022'
run: |
echo "INVOKEAI_ROOT=${{ github.workspace }}\invokeai" >> ${{ matrix.github-env }}
echo "INVOKEAI_OUTDIR=${{ github.workspace }}\invokeai\outputs" >> ${{ matrix.github-env }}
- name: set INVOKEAI_ROOT others
if: matrix.os != 'windows-2022'
run: |
echo "INVOKEAI_ROOT=${{ github.workspace }}/invokeai" >> ${{ matrix.github-env }}
echo "INVOKEAI_OUTDIR=${{ github.workspace }}/invokeai/outputs" >> ${{ matrix.github-env }}
- name: Use Cached diffusers-1.5
id: cache-sd-model
uses: actions/cache@v3
env:
cache-name: huggingface-${{ matrix.stable-diffusion-model }}
- name: setup python
uses: actions/setup-python@v4
with:
path: |
${{ env.INVOKEAI_ROOT }}/models/runwayml
${{ env.INVOKEAI_ROOT }}/models/stabilityai
${{ env.INVOKEAI_ROOT }}/models/CompVis
key: ${{ env.cache-name }}
python-version: ${{ matrix.python-version }}
- name: Set Cache-Directory Windows
if: runner.os == 'Windows'
id: set-cache-dir-windows
run: |
echo "CACHE_DIR=$HOME\invokeai\models" >> ${{ matrix.github-env }}
echo "PIP_NO_CACHE_DIR=1" >> ${{ matrix.github-env }}
- name: Set Cache-Directory others
if: runner.os != 'Windows'
id: set-cache-dir-others
run: echo "CACHE_DIR=$HOME/invokeai/models" >> ${{ matrix.github-env }}
- name: set test prompt to main branch validation
if: ${{ github.ref == 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/preflight_prompts.txt" >> ${{ matrix.github-env }}
- name: set test prompt to development branch validation
if: ${{ github.ref == 'refs/heads/development' }}
run: echo "TEST_PROMPTS=tests/dev_prompts.txt" >> ${{ matrix.github-env }}
- name: set test prompt to Pull Request validation
if: ${{ github.ref != 'refs/heads/main' && github.ref != 'refs/heads/development' }}
if: ${{ github.ref != 'refs/heads/main' }}
run: echo "TEST_PROMPTS=tests/validate_pr_prompt.txt" >> ${{ matrix.github-env }}
- name: create requirements.txt
run: cp 'environments-and-requirements/${{ matrix.requirements-file }}' '${{ matrix.requirements-file }}'
- name: install invokeai
run: pip3 install --use-pep517 -e .
env:
PIP_EXTRA_INDEX_URL: ${{ matrix.extra-index-url }}
- name: setup python
uses: actions/setup-python@v4
- name: Use Cached models
id: cache-sd-model
uses: actions/cache@v3
env:
cache-name: huggingface-models
with:
python-version: ${{ matrix.python-version }}
# cache: 'pip'
# cache-dependency-path: ${{ matrix.requirements-file }}
path: ${{ env.CACHE_DIR }}
key: ${{ env.cache-name }}
enableCrossOsArchive: true
- name: install dependencies
run: pip3 install --upgrade pip setuptools wheel
- name: install requirements
run: pip3 install -r '${{ matrix.requirements-file }}'
- name: run configure_invokeai.py
- name: run configure_invokeai
id: run-preload-models
env:
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGINGFACE_TOKEN }}
run: >
configure_invokeai.py
configure_invokeai
--yes
--default_only
--full-precision # can't use fp16 weights without a GPU
--full-precision
# can't use fp16 weights without a GPU
- name: Run the tests
if: runner.os != 'Windows'
id: run-tests
if: matrix.os != 'windows-2022'
env:
# Set offline mode to make sure configure preloaded successfully.
HF_HUB_OFFLINE: 1
HF_DATASETS_OFFLINE: 1
TRANSFORMERS_OFFLINE: 1
run: >
python3 scripts/invoke.py
invoke
--no-patchmatch
--no-nsfw_checker
--model ${{ matrix.stable-diffusion-model }}
--from_file ${{ env.TEST_PROMPTS }}
--root="${{ env.INVOKEAI_ROOT }}"
--outdir="${{ env.INVOKEAI_OUTDIR }}"
- name: Archive results
id: archive-results
if: matrix.os != 'windows-2022'
uses: actions/upload-artifact@v3
with:
name: results_${{ matrix.requirements-file }}_${{ matrix.python-version }}
name: results_${{ matrix.pytorch }}_${{ matrix.python-version }}
path: ${{ env.INVOKEAI_ROOT }}/outputs

View File

@ -1,6 +1,6 @@
<div align="center">
![project logo](docs/assets/invoke_ai_banner.png)
![project logo](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/invoke_ai_banner.png)
# InvokeAI: A Stable Diffusion Toolkit
@ -28,6 +28,7 @@
[latest commit to main link]: https://github.com/invoke-ai/InvokeAI/commits/main
[latest release badge]: https://flat.badgen.net/github/release/invoke-ai/InvokeAI/development?icon=github
[latest release link]: https://github.com/invoke-ai/InvokeAI/releases
</div>
InvokeAI is a leading creative engine built to empower professionals and enthusiasts alike. Generate and create stunning visual media using the latest AI-driven technologies. InvokeAI offers an industry leading Web Interface, interactive Command Line Interface, and also serves as the foundation for multiple commercial products.
@ -38,8 +39,11 @@ _Note: InvokeAI is rapidly evolving. Please use the
[Issues](https://github.com/invoke-ai/InvokeAI/issues) tab to report bugs and make feature
requests. Be sure to use the provided templates. They will help us diagnose issues faster._
<div align="center">
![canvas preview](docs/assets/canvas_preview.png)
![canvas preview](https://github.com/mauwii/InvokeAI/raw/main/docs/assets/canvas_preview.png)
</div>
# Getting Started with InvokeAI
@ -81,6 +85,7 @@ instructions, please see:
InvokeAI is supported across Linux, Windows and macOS. Linux
users can use either an Nvidia-based card (with CUDA support) or an
AMD card (using the ROCm driver).
#### System
You will need one of the following:
@ -105,18 +110,23 @@ to render 512x512 images.
Feature documentation can be reviewed by navigating to [the InvokeAI Documentation page](https://invoke-ai.github.io/InvokeAI/features/)
### *Web Server & UI*
InvokeAI offers a locally hosted Web Server & React Frontend, with an industry leading user experience. The Web-based UI allows for simple and intuitive workflows, and is responsive for use on mobile devices and tablets accessing the web server.
### *Unified Canvas*
The Unified Canvas is a fully integrated canvas implementation with support for all core generation capabilities, in/outpainting, brush tools, and more. This creative tool unlocks the capability for artists to create with AI as a creative collaborator, and can be used to augment AI-generated imagery, sketches, photography, renders, and more.
### *Advanced Prompt Syntax*
InvokeAI's advanced prompt syntax allows for token weighting, cross-attention control, and prompt blending, allowing for fine-tuned tweaking of your invocations and exploration of the latent space.
### *Command Line Interface*
For users utilizing a terminal-based environment, or who want to take advantage of CLI features, InvokeAI offers an extensive and actively supported command-line interface that provides the full suite of generation functionality available in the tool.
### Other features
- *Support for both ckpt and diffusers models*
- *SD 2.0, 2.1 support*
- *Noise Control & Tresholding*
@ -126,6 +136,7 @@ For users utilizing a terminal-based environment, or who want to take advantage
- *Model Manager & Support*
### Coming Soon
- *Node-Based Architecture & UI*
- And more...

View File

@ -119,10 +119,8 @@ manager, please follow these steps:
6. Run PIP
Be sure that the `invokeai` environment is active before doing this:
```bash
pip install --prefer-binary -r requirements.txt
pip --python invokeai install --use-pep517 .
```
7. Set up the runtime directory
@ -137,7 +135,7 @@ manager, please follow these steps:
default to `invokeai` in your home directory.
```bash
configure_invokeai.py --root_dir ~/Programs/invokeai
configure_invokeai --root_dir ~/Programs/invokeai
```
The script `configure_invokeai.py` will interactively guide you through the
@ -452,7 +450,7 @@ time. Note that this method only works with the PIP method.
step.
3. Run one additional step while you are in the source code repository
directory `pip install .` (note the dot at the end).
directory `pip install --use-pep517 .` (note the dot at the end).
4. That's all! Now, whenever you activate the virtual environment,
`invoke.py` will know where to look for the runtime directory without

View File

@ -18,7 +18,13 @@ Windows systems with no extra intervention.
## Macintosh
PyPatchMatch is not currently supported, but the team is working on it.
You need to have opencv installed so that pypatchmatch can be built:
```bash
brew install opencv
```
The next time you start `invoke`, after sucesfully installing opencv, pypatchmatch will be built.
## Linux
@ -39,23 +45,16 @@ Prior to installing PyPatchMatch, you need to take the following steps:
sudo apt install python3-opencv libopencv-dev
```
3. Fix the naming of the `opencv` package configuration file:
```sh
cd /usr/lib/x86_64-linux-gnu/pkgconfig/
ln -sf opencv4.pc opencv.pc
```
4. Activate the environment you use for invokeai, either with `conda` or with a
3. Activate the environment you use for invokeai, either with `conda` or with a
virtual environment.
5. Install pypatchmatch:
4. Install pypatchmatch:
```sh
pip install "git+https://github.com/invoke-ai/PyPatchMatch@0.1.3#egg=pypatchmatch"
pip install pypatchmatch
```
6. Confirm that pypatchmatch is installed. At the command-line prompt enter
5. Confirm that pypatchmatch is installed. At the command-line prompt enter
`python`, and then at the `>>>` line type
`from patchmatch import patch_match`: It should look like the follwing:

View File

@ -254,65 +254,10 @@ steps:
source invokeai/bin/activate
```
4. Pick the correct `requirements*.txt` file for your hardware and operating
system.
We have created a series of environment files suited for different operating
systems and GPU hardware. They are located in the
`environments-and-requirements` directory:
<figure markdown>
| filename | OS |
| :---------------------------------: | :-------------------------------------------------------------: |
| requirements-lin-amd.txt | Linux with an AMD (ROCm) GPU |
| requirements-lin-arm64.txt | Linux running on arm64 systems |
| requirements-lin-cuda.txt | Linux with an NVIDIA (CUDA) GPU |
| requirements-mac-mps-cpu.txt | Macintoshes with MPS acceleration |
| requirements-lin-win-colab-cuda.txt | Windows with an NVIDA (CUDA) GPU<br>(supports Google Colab too) |
</figure>
Select the appropriate requirements file, and make a link to it from
`requirements.txt` in the top-level InvokeAI directory. The command to do
this from the top-level directory is:
!!! example ""
=== "Macintosh and Linux"
!!! info "Replace `xxx` and `yyy` with the appropriate OS and GPU codes."
4. Run PIP
```bash
ln -sf environments-and-requirements/requirements-xxx-yyy.txt requirements.txt
```
=== "Windows"
!!! info "on Windows, admin privileges are required to make links, so we use the copy command instead"
```cmd
copy environments-and-requirements\requirements-lin-win-colab-cuda.txt requirements.txt
```
!!! warning
Please do not link or copy `environments-and-requirements/requirements-base.txt`.
This is a base requirements file that does not have the platform-specific
libraries. Also, be sure to link or copy the platform-specific file to
a top-level file named `requirements.txt` as shown here. Running pip on
a requirements file in a subdirectory will not work as expected.
When this is done, confirm that a file named `requirements.txt` has been
created in the InvokeAI root directory and that it points to the correct
file in `environments-and-requirements`.
5. Run PIP
Be sure that the `invokeai` environment is active before doing this:
```bash
pip install --prefer-binary -r requirements.txt
pip --python invokeai install --use-pep517 .
```
---

View File

@ -1099,7 +1099,7 @@ def report_model_error(opt:Namespace, e:Exception):
if yes_to_all is not None:
sys.argv.append(yes_to_all)
import configure_invokeai
import ldm.invoke.configure_invokeai as configure_invokeai
configure_invokeai.main()
print('** InvokeAI will now restart')
sys.argv = previous_args

130
pyproject.toml Normal file
View File

@ -0,0 +1,130 @@
[build-system]
requires = ["setuptools~=65.5", "pip~=22.3", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "InvokeAI"
description = "An implementation of Stable Diffusion which provides various new features and options to aid the image generation process"
requires-python = ">=3.9, <3.11"
readme = { content-type = "text/markdown", file = "README.md" }
keywords = ["stable-diffusion", "AI"]
dynamic = ["version"]
license = { file = "LICENSE" }
authors = [{ name = "The InvokeAI Project", email = "lincoln.stein@gmail.com" }]
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: GPU',
'Environment :: GPU :: NVIDIA CUDA',
'Environment :: MacOS X',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Artistic Software',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Multimedia :: Graphics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Image Processing',
]
dependencies = [
"accelerate",
"albumentations",
"clip_anytorch", # replaceing "clip @ https://github.com/openai/CLIP/archive/eaa22acb90a5876642d0507623e859909230a52d.zip",
"clipseg @ https://github.com/invoke-ai/clipseg/archive/relaxed-python-requirement.zip", # is this still necesarry with diffusers?
"datasets",
"diffusers[torch]~=0.11",
"dnspython==2.2.1",
"einops",
"eventlet",
"facexlib",
"flask==2.1.3",
"flask_cors==3.0.10",
"flask_socketio==5.3.0",
"flaskwebgui==1.0.3",
"getpass_asterisk",
"gfpgan==1.3.8",
"huggingface-hub>=0.11.1",
"imageio",
"imageio-ffmpeg",
"k-diffusion", # replaceing "k-diffusion @ https://github.com/Birch-san/k-diffusion/archive/refs/heads/mps.zip",
"kornia",
"npyscreen",
"numpy~=1.23",
"omegaconf",
"opencv-python",
"picklescan",
"pillow",
"pudb",
"pypatchmatch",
"pyreadline3",
"pytorch-lightning==1.7.7",
"realesrgan",
"requests==2.25.1",
"safetensors",
"scikit-image>=0.19",
"send2trash",
"streamlit",
"taming-transformers-rom1504",
"test-tube>=0.7.5",
"torch>=1.13.1",
"torch-fidelity",
"torchvision>=0.14.1",
"torchmetrics",
"transformers~=4.25",
"windows-curses; sys_platform=='win32'",
]
[project.optional-dependencies]
"dist" = ["pip-tools", "pipdeptree", "twine"]
"docs" = [
"mkdocs-material<9.0",
"mkdocs-git-revision-date-localized-plugin",
"mkdocs-redirects==1.2.0",
]
test = ["pytest>6.0.0", "pytest-cov"]
[project.scripts]
"configure_invokeai" = "ldm.invoke.configure_invokeai:main"
"dream" = "ldm.invoke:CLI.main"
"invoke" = "ldm.invoke:CLI.main"
"legacy_api" = "scripts:legacy_api.main"
"load_models" = "scripts:configure_invokeai.main"
"merge_embeddings" = "scripts:merge_embeddings.main"
"preload_models" = "ldm.invoke.configure_invokeai:main"
[project.urls]
"Homepage" = "https://invoke-ai.github.io/InvokeAI/"
"Documentation" = "https://invoke-ai.github.io/InvokeAI/"
"Source" = "https://github.com/invoke-ai/InvokeAI/"
"Bug Reports" = "https://github.com/invoke-ai/InvokeAI/issues"
"Discord" = "https://discord.gg/ZmtBAhwWhy"
[tool.setuptools.dynamic]
version = { attr = "ldm.invoke.__version__" }
[tool.setuptools.packages.find]
"where" = ["."]
"include" = ["assets", "backend*", "configs*", "frontend.dist*", "ldm*"]
[tool.setuptools.package-data]
"assets" = ["caution.png"]
"backend" = ["**.png"]
"configs" = ["*.example", "**/*.yaml", "*.txt"]
"frontend.dist" = ["**"]
[tool.setuptools.exclude-package-data]
configs = ["models.yaml"]
[tool.pytest.ini_options]
minversion = "6.0"
addopts = "-p pytest_cov --junitxml=junit/test-results.xml --cov=./ldm/ --cov=./backend --cov-branch"
python_files = ["test_*.py"]
pythonpath = [".venv/lib/python3.9", ".venv/lib/python3.10"]
testpaths = ["tests"]

View File

@ -5,7 +5,7 @@
# two machines must share a common .cache directory.
import warnings
import configure_invokeai
import ldm.invoke.configure_invokeai as configure_invokeai
if __name__ == '__main__':
configure_invokeai.main()

View File

@ -5,7 +5,7 @@
# two machines must share a common .cache directory.
import warnings
import configure_invokeai
import ldm.invoke.configure_invokeai as configure_invokeai
if __name__ == '__main__':
configure_invokeai.main()

100
setup.py
View File

@ -1,100 +0,0 @@
import sys
import os
import re
from setuptools import setup, find_packages
def list_files(directory):
listing = list()
for root, dirs, files in os.walk(directory,topdown=False):
pair = (root,[os.path.join(root,f) for f in files])
listing.append(pair)
return listing
def get_version()->str:
from ldm.invoke import __version__ as version
return version
# The canonical version number is stored in the file ldm/invoke/_version.py
VERSION = get_version()
DESCRIPTION = ('An implementation of Stable Diffusion which provides various new features'
' and options to aid the image generation process')
LONG_DESCRIPTION = ('This version of Stable Diffusion features a slick WebGUI, an'
' interactive command-line script that combines text2img and img2img'
' functionality in a "dream bot" style interface, and multiple features'
' and other enhancements.')
HOMEPAGE = 'https://github.com/invoke-ai/InvokeAI'
FRONTEND_FILES = list_files('frontend/dist')
FRONTEND_FILES.append(('assets',['assets/caution.png']))
print(FRONTEND_FILES)
REQUIREMENTS=[
'accelerate',
'albumentations',
'diffusers',
'eventlet',
'flask_cors',
'flask_socketio',
'flaskwebgui',
'getpass_asterisk',
'imageio-ffmpeg',
'pyreadline3',
'realesrgan',
'send2trash',
'streamlit',
'taming-transformers-rom1504',
'test-tube',
'torch-fidelity',
'torch',
'torchvision',
'transformers',
'picklescan',
'clip',
'clipseg',
'gfpgan',
'k-diffusion',
'pypatchmatch',
]
setup(
name='InvokeAI',
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author='The InvokeAI Project',
author_email='lincoln.stein@gmail.com',
url=HOMEPAGE,
license='MIT',
packages=find_packages(exclude=['tests.*']),
install_requires=REQUIREMENTS,
dependency_links=['https://download.pytorch.org/whl/torch_stable.html'],
python_requires='>=3.9, <4',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: GPU',
'Environment :: GPU :: NVIDIA CUDA',
'Environment :: MacOS X',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 3 :: Only,'
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Artistic Software',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Multimedia :: Graphics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Image Processing',
],
scripts = ['scripts/invoke.py','scripts/configure_invokeai.py', 'scripts/sd-metadata.py',
'scripts/preload_models.py', 'scripts/images2prompt.py',
'scripts/textual_inversion_fe.py','scripts/textual_inversion.py',
'scripts/merge_models_fe.py', 'scripts/merge_models.py',
],
data_files=FRONTEND_FILES,
)