mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
TOIL(requirements): Split requirements to per-platform (#355)
* toil(reqs): split requirements to per-platform Signed-off-by: Ben Alkov <ben.alkov@gmail.com> * toil(reqs): fix for Win and Lin... ...allow pip to resolve latest torch, numpy Signed-off-by: Ben Alkov <ben.alkov@gmail.com> * toil(install): update reqs in Win install notebook Signed-off-by: Ben Alkov <ben.alkov@gmail.com> Signed-off-by: Ben Alkov <ben.alkov@gmail.com>
This commit is contained in:
parent
4406fd138d
commit
f4004f660e
@ -65,25 +65,31 @@
|
|||||||
"imageio-ffmpeg==0.4.2\n",
|
"imageio-ffmpeg==0.4.2\n",
|
||||||
"imageio==2.9.0\n",
|
"imageio==2.9.0\n",
|
||||||
"kornia==0.6.0\n",
|
"kornia==0.6.0\n",
|
||||||
|
"# pip will resolve the version which matches torch\n",
|
||||||
|
"numpy\n",
|
||||||
"omegaconf==2.1.1\n",
|
"omegaconf==2.1.1\n",
|
||||||
"opencv-python==4.6.0.66\n",
|
"opencv-python==4.6.0.66\n",
|
||||||
"pillow==9.2.0\n",
|
"pillow==9.2.0\n",
|
||||||
|
"pip>=22\n",
|
||||||
"pudb==2019.2\n",
|
"pudb==2019.2\n",
|
||||||
"pytorch-lightning==1.4.2\n",
|
"pytorch-lightning==1.4.2\n",
|
||||||
"streamlit==1.12.0\n",
|
"streamlit==1.12.0\n",
|
||||||
"# Regular \"taming-transformers\" doesn't seem to work\n",
|
"# \"CompVis/taming-transformers\" doesn't work\n",
|
||||||
|
"# ldm\\models\\autoencoder.py\", line 6, in <module>\n",
|
||||||
|
"# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer\n",
|
||||||
|
"# ModuleNotFoundError\n",
|
||||||
"taming-transformers-rom1504==0.0.6\n",
|
"taming-transformers-rom1504==0.0.6\n",
|
||||||
"test-tube>=0.7.5\n",
|
"test-tube>=0.7.5\n",
|
||||||
"torch-fidelity==0.3.0\n",
|
"torch-fidelity==0.3.0\n",
|
||||||
"torchmetrics==0.6.0\n",
|
"torchmetrics==0.6.0\n",
|
||||||
"torchvision==0.12.0\n",
|
|
||||||
"transformers==4.19.2\n",
|
"transformers==4.19.2\n",
|
||||||
"git+https://github.com/openai/CLIP.git@main#egg=clip\n",
|
"git+https://github.com/openai/CLIP.git@main#egg=clip\n",
|
||||||
"git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion\n",
|
"git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion\n",
|
||||||
"# No CUDA in PyPi builds\n",
|
"# No CUDA in PyPi builds\n",
|
||||||
"torch@https://download.pytorch.org/whl/cu113/torch-1.11.0%2Bcu113-cp310-cp310-win_amd64.whl\n",
|
"--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org\n",
|
||||||
"# No MKL in PyPi builds (faster, more robust than OpenBLAS)\n",
|
"torch==1.11.0\n",
|
||||||
"numpy@https://download.lfd.uci.edu/pythonlibs/archived/numpy-1.22.4+mkl-cp310-cp310-win_amd64.whl\n",
|
"# Same as numpy - let pip do its thing\n",
|
||||||
|
"torchvision\n",
|
||||||
"-e .\n"
|
"-e .\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
32
requirements-lin.txt
Normal file
32
requirements-lin.txt
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
albumentations==0.4.3
|
||||||
|
einops==0.3.0
|
||||||
|
huggingface-hub==0.8.1
|
||||||
|
imageio-ffmpeg==0.4.2
|
||||||
|
imageio==2.9.0
|
||||||
|
kornia==0.6.0
|
||||||
|
# pip will resolve the version which matches torch
|
||||||
|
numpy
|
||||||
|
omegaconf==2.1.1
|
||||||
|
opencv-python==4.6.0.66
|
||||||
|
pillow==9.2.0
|
||||||
|
pip>=22
|
||||||
|
pudb==2019.2
|
||||||
|
pytorch-lightning==1.4.2
|
||||||
|
streamlit==1.12.0
|
||||||
|
# "CompVis/taming-transformers" doesn't work
|
||||||
|
# ldm\models\autoencoder.py", line 6, in <module>
|
||||||
|
# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
|
||||||
|
# ModuleNotFoundError
|
||||||
|
taming-transformers-rom1504==0.0.6
|
||||||
|
test-tube>=0.7.5
|
||||||
|
torch-fidelity==0.3.0
|
||||||
|
torchmetrics==0.6.0
|
||||||
|
transformers==4.19.2
|
||||||
|
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
|
git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion
|
||||||
|
# No CUDA in PyPi builds
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org
|
||||||
|
torch==1.11.0
|
||||||
|
# Same as numpy - let pip do its thing
|
||||||
|
torchvision
|
||||||
|
-e .
|
32
requirements-win.txt
Normal file
32
requirements-win.txt
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
albumentations==0.4.3
|
||||||
|
einops==0.3.0
|
||||||
|
huggingface-hub==0.8.1
|
||||||
|
imageio-ffmpeg==0.4.2
|
||||||
|
imageio==2.9.0
|
||||||
|
kornia==0.6.0
|
||||||
|
# pip will resolve the version which matches torch
|
||||||
|
numpy
|
||||||
|
omegaconf==2.1.1
|
||||||
|
opencv-python==4.6.0.66
|
||||||
|
pillow==9.2.0
|
||||||
|
pip>=22
|
||||||
|
pudb==2019.2
|
||||||
|
pytorch-lightning==1.4.2
|
||||||
|
streamlit==1.12.0
|
||||||
|
# "CompVis/taming-transformers" doesn't work
|
||||||
|
# ldm\models\autoencoder.py", line 6, in <module>
|
||||||
|
# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
|
||||||
|
# ModuleNotFoundError
|
||||||
|
taming-transformers-rom1504==0.0.6
|
||||||
|
test-tube>=0.7.5
|
||||||
|
torch-fidelity==0.3.0
|
||||||
|
torchmetrics==0.6.0
|
||||||
|
transformers==4.19.2
|
||||||
|
git+https://github.com/openai/CLIP.git@main#egg=clip
|
||||||
|
git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion
|
||||||
|
# No CUDA in PyPi builds
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org
|
||||||
|
torch==1.11.0
|
||||||
|
# Same as numpy - let pip do its thing
|
||||||
|
torchvision
|
||||||
|
-e .
|
Loading…
Reference in New Issue
Block a user