mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
f4004f660e
* toil(reqs): split requirements to per-platform Signed-off-by: Ben Alkov <ben.alkov@gmail.com> * toil(reqs): fix for Win and Lin... ...allow pip to resolve latest torch, numpy Signed-off-by: Ben Alkov <ben.alkov@gmail.com> * toil(install): update reqs in Win install notebook Signed-off-by: Ben Alkov <ben.alkov@gmail.com> Signed-off-by: Ben Alkov <ben.alkov@gmail.com>
33 lines
919 B
Plaintext
33 lines
919 B
Plaintext
albumentations==0.4.3
|
|
einops==0.3.0
|
|
huggingface-hub==0.8.1
|
|
imageio-ffmpeg==0.4.2
|
|
imageio==2.9.0
|
|
kornia==0.6.0
|
|
# pip will resolve the version which matches torch
|
|
numpy
|
|
omegaconf==2.1.1
|
|
opencv-python==4.6.0.66
|
|
pillow==9.2.0
|
|
pip>=22
|
|
pudb==2019.2
|
|
pytorch-lightning==1.4.2
|
|
streamlit==1.12.0
|
|
# "CompVis/taming-transformers" doesn't work
|
|
# ldm\models\autoencoder.py", line 6, in <module>
|
|
# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
|
|
# ModuleNotFoundError
|
|
taming-transformers-rom1504==0.0.6
|
|
test-tube>=0.7.5
|
|
torch-fidelity==0.3.0
|
|
torchmetrics==0.6.0
|
|
transformers==4.19.2
|
|
git+https://github.com/openai/CLIP.git@main#egg=clip
|
|
git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion
|
|
# No CUDA in PyPi builds
|
|
--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org
|
|
torch==1.11.0
|
|
# Same as numpy - let pip do its thing
|
|
torchvision
|
|
-e .
|