From eb642653cb4e45acce0a433cb68815dc62db4f07 Mon Sep 17 00:00:00 2001 From: Alexandre Macabies Date: Sun, 30 Jul 2023 17:05:10 +0200 Subject: [PATCH] Add Nix Flake for development, which uses Python virtualenv. --- flake.lock | 25 +++++++++++++++++ flake.nix | 81 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+) create mode 100644 flake.lock create mode 100644 flake.nix diff --git a/flake.lock b/flake.lock new file mode 100644 index 0000000000..536c8259b1 --- /dev/null +++ b/flake.lock @@ -0,0 +1,25 @@ +{ + "nodes": { + "nixpkgs": { + "locked": { + "lastModified": 1690630721, + "narHash": "sha256-Y04onHyBQT4Erfr2fc82dbJTfXGYrf4V0ysLUYnPOP8=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "d2b52322f35597c62abf56de91b0236746b2a03d", + "type": "github" + }, + "original": { + "id": "nixpkgs", + "type": "indirect" + } + }, + "root": { + "inputs": { + "nixpkgs": "nixpkgs" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 0000000000..1f35c02c6e --- /dev/null +++ b/flake.nix @@ -0,0 +1,81 @@ +# Important note: this flake does not attempt to create a fully isolated, 'pure' +# Python environment for InvokeAI. Instead, it depends on local invocations of +# virtualenv/pip to install the required (binary) packages, most importantly the +# prebuilt binary pytorch packages with CUDA support. +# ML Python packages with CUDA support, like pytorch, are notoriously expensive +# to compile so it's purposefuly not what this flake does. + +{ + description = "An (impure) flake to develop on InvokeAI."; + + outputs = { self, nixpkgs }: + let + system = "x86_64-linux"; + pkgs = import nixpkgs { + inherit system; + config.allowUnfree = true; + }; + + python = pkgs.python310; + + mkShell = { dir, install }: + let + setupScript = pkgs.writeScript "setup-invokai" '' + # This must be sourced using 'source', not executed. + ${python}/bin/python -m venv ${dir} + ${dir}/bin/python -m pip install ${install} + # ${dir}/bin/python -c 'import torch; assert(torch.cuda.is_available())' + source ${dir}/bin/activate + ''; + in + pkgs.mkShell rec { + buildInputs = with pkgs; [ + # Backend: graphics, CUDA. + cudaPackages.cudnn + cudaPackages.cuda_nvrtc + cudatoolkit + freeglut + glib + gperf + procps + libGL + libGLU + linuxPackages.nvidia_x11 + python + stdenv.cc + stdenv.cc.cc.lib + xorg.libX11 + xorg.libXext + xorg.libXi + xorg.libXmu + xorg.libXrandr + xorg.libXv + zlib + + # Pre-commit hooks. + black + + # Frontend. + yarn + nodejs + ]; + LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath buildInputs; + CUDA_PATH = pkgs.cudatoolkit; + EXTRA_LDFLAGS = "-L${pkgs.linuxPackages.nvidia_x11}/lib"; + shellHook = '' + if [[ -f "${dir}/bin/activate" ]]; then + source "${dir}/bin/activate" + echo "Using Python: $(which python)" + else + echo "Use 'source ${setupScript}' to set up the environment." + fi + ''; + }; + in + { + devShells.${system} = rec { + develop = mkShell { dir = "venv"; install = "-e '.[xformers]' --extra-index-url https://download.pytorch.org/whl/cu118"; }; + default = develop; + }; + }; +}