From f4004f660e5daba721426cfcd3fe95318fd10bc3 Mon Sep 17 00:00:00 2001 From: tildebyte <337875+tildebyte@users.noreply.github.com> Date: Sun, 4 Sep 2022 19:43:04 -0400 Subject: [PATCH] TOIL(requirements): Split requirements to per-platform (#355) * toil(reqs): split requirements to per-platform Signed-off-by: Ben Alkov * toil(reqs): fix for Win and Lin... ...allow pip to resolve latest torch, numpy Signed-off-by: Ben Alkov * toil(install): update reqs in Win install notebook Signed-off-by: Ben Alkov Signed-off-by: Ben Alkov --- Stable-Diffusion-local-Windows.ipynb | 16 ++++++++---- requirements-lin.txt | 32 ++++++++++++++++++++++++ requirements.txt => requirements-mac.txt | 0 requirements-win.txt | 32 ++++++++++++++++++++++++ 4 files changed, 75 insertions(+), 5 deletions(-) create mode 100644 requirements-lin.txt rename requirements.txt => requirements-mac.txt (100%) create mode 100644 requirements-win.txt diff --git a/Stable-Diffusion-local-Windows.ipynb b/Stable-Diffusion-local-Windows.ipynb index f4cea1503d..1c5e90dcad 100644 --- a/Stable-Diffusion-local-Windows.ipynb +++ b/Stable-Diffusion-local-Windows.ipynb @@ -65,25 +65,31 @@ "imageio-ffmpeg==0.4.2\n", "imageio==2.9.0\n", "kornia==0.6.0\n", + "# pip will resolve the version which matches torch\n", + "numpy\n", "omegaconf==2.1.1\n", "opencv-python==4.6.0.66\n", "pillow==9.2.0\n", + "pip>=22\n", "pudb==2019.2\n", "pytorch-lightning==1.4.2\n", "streamlit==1.12.0\n", - "# Regular \"taming-transformers\" doesn't seem to work\n", + "# \"CompVis/taming-transformers\" doesn't work\n", + "# ldm\\models\\autoencoder.py\", line 6, in \n", + "# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer\n", + "# ModuleNotFoundError\n", "taming-transformers-rom1504==0.0.6\n", "test-tube>=0.7.5\n", "torch-fidelity==0.3.0\n", "torchmetrics==0.6.0\n", - "torchvision==0.12.0\n", "transformers==4.19.2\n", "git+https://github.com/openai/CLIP.git@main#egg=clip\n", "git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion\n", "# No CUDA in PyPi builds\n", - "torch@https://download.pytorch.org/whl/cu113/torch-1.11.0%2Bcu113-cp310-cp310-win_amd64.whl\n", - "# No MKL in PyPi builds (faster, more robust than OpenBLAS)\n", - "numpy@https://download.lfd.uci.edu/pythonlibs/archived/numpy-1.22.4+mkl-cp310-cp310-win_amd64.whl\n", + "--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org\n", + "torch==1.11.0\n", + "# Same as numpy - let pip do its thing\n", + "torchvision\n", "-e .\n" ] }, diff --git a/requirements-lin.txt b/requirements-lin.txt new file mode 100644 index 0000000000..9c3d1e7451 --- /dev/null +++ b/requirements-lin.txt @@ -0,0 +1,32 @@ +albumentations==0.4.3 +einops==0.3.0 +huggingface-hub==0.8.1 +imageio-ffmpeg==0.4.2 +imageio==2.9.0 +kornia==0.6.0 +# pip will resolve the version which matches torch +numpy +omegaconf==2.1.1 +opencv-python==4.6.0.66 +pillow==9.2.0 +pip>=22 +pudb==2019.2 +pytorch-lightning==1.4.2 +streamlit==1.12.0 +# "CompVis/taming-transformers" doesn't work +# ldm\models\autoencoder.py", line 6, in +# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer +# ModuleNotFoundError +taming-transformers-rom1504==0.0.6 +test-tube>=0.7.5 +torch-fidelity==0.3.0 +torchmetrics==0.6.0 +transformers==4.19.2 +git+https://github.com/openai/CLIP.git@main#egg=clip +git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion +# No CUDA in PyPi builds +--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org +torch==1.11.0 +# Same as numpy - let pip do its thing +torchvision +-e . diff --git a/requirements.txt b/requirements-mac.txt similarity index 100% rename from requirements.txt rename to requirements-mac.txt diff --git a/requirements-win.txt b/requirements-win.txt new file mode 100644 index 0000000000..9c3d1e7451 --- /dev/null +++ b/requirements-win.txt @@ -0,0 +1,32 @@ +albumentations==0.4.3 +einops==0.3.0 +huggingface-hub==0.8.1 +imageio-ffmpeg==0.4.2 +imageio==2.9.0 +kornia==0.6.0 +# pip will resolve the version which matches torch +numpy +omegaconf==2.1.1 +opencv-python==4.6.0.66 +pillow==9.2.0 +pip>=22 +pudb==2019.2 +pytorch-lightning==1.4.2 +streamlit==1.12.0 +# "CompVis/taming-transformers" doesn't work +# ldm\models\autoencoder.py", line 6, in +# from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer +# ModuleNotFoundError +taming-transformers-rom1504==0.0.6 +test-tube>=0.7.5 +torch-fidelity==0.3.0 +torchmetrics==0.6.0 +transformers==4.19.2 +git+https://github.com/openai/CLIP.git@main#egg=clip +git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion +# No CUDA in PyPi builds +--extra-index-url https://download.pytorch.org/whl/cu113 --trusted-host https://download.pytorch.org +torch==1.11.0 +# Same as numpy - let pip do its thing +torchvision +-e .