mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
feat(installer): remove extra GPU options
- Remove `CUDA_AND_DML`. This was for onnx, which we have since removed. - Remove `AUTODETECT`. This option causes problems for windows users, as it falls back on default pypi index resulting in a non-CUDA torch being installed. - Add more explicit settings for extra index URL, based on the torch website - Fix bug where `xformers` wasn't installed on linux and/or windows when autodetect was selected
This commit is contained in:
parent
9fdfd4267c
commit
52b58b4a80
@ -404,22 +404,29 @@ def get_torch_source() -> Tuple[str | None, str | None]:
|
|||||||
# device can be one of: "cuda", "rocm", "cpu", "cuda_and_dml, autodetect"
|
# device can be one of: "cuda", "rocm", "cpu", "cuda_and_dml, autodetect"
|
||||||
device = select_gpu()
|
device = select_gpu()
|
||||||
|
|
||||||
|
# The correct extra index URLs for torch are inconsistent, see https://pytorch.org/get-started/locally/#start-locally
|
||||||
|
|
||||||
url = None
|
url = None
|
||||||
optional_modules = "[onnx]"
|
optional_modules: str | None = None
|
||||||
if OS == "Linux":
|
if OS == "Linux":
|
||||||
if device.value == "rocm":
|
if device.value == "rocm":
|
||||||
url = "https://download.pytorch.org/whl/rocm5.6"
|
url = "https://download.pytorch.org/whl/rocm5.6"
|
||||||
elif device.value == "cpu":
|
elif device.value == "cpu":
|
||||||
url = "https://download.pytorch.org/whl/cpu"
|
url = "https://download.pytorch.org/whl/cpu"
|
||||||
|
elif device.value == "cuda":
|
||||||
|
# CUDA uses the default PyPi index
|
||||||
|
optional_modules = "[xformers,onnx-cuda]"
|
||||||
elif OS == "Windows":
|
elif OS == "Windows":
|
||||||
if device.value == "cuda":
|
if device.value == "cuda":
|
||||||
url = "https://download.pytorch.org/whl/cu121"
|
url = "https://download.pytorch.org/whl/cu121"
|
||||||
optional_modules = "[xformers,onnx-cuda]"
|
optional_modules = "[xformers,onnx-cuda]"
|
||||||
if device.value == "cuda_and_dml":
|
elif device.value == "cpu":
|
||||||
url = "https://download.pytorch.org/whl/cu121"
|
# CPU uses the default PyPi index, no optional modules
|
||||||
optional_modules = "[xformers,onnx-directml]"
|
pass
|
||||||
|
elif OS == "Darwin":
|
||||||
|
# macOS uses the default PyPi index, no optional modules
|
||||||
|
pass
|
||||||
|
|
||||||
# in all other cases, Torch wheels should be coming from PyPi as of Torch 1.13
|
# Fall back to defaults
|
||||||
|
|
||||||
return (url, optional_modules)
|
return (url, optional_modules)
|
||||||
|
@ -207,10 +207,8 @@ def dest_path(dest: Optional[str | Path] = None) -> Path | None:
|
|||||||
|
|
||||||
class GpuType(Enum):
|
class GpuType(Enum):
|
||||||
CUDA = "cuda"
|
CUDA = "cuda"
|
||||||
CUDA_AND_DML = "cuda_and_dml"
|
|
||||||
ROCM = "rocm"
|
ROCM = "rocm"
|
||||||
CPU = "cpu"
|
CPU = "cpu"
|
||||||
AUTODETECT = "autodetect"
|
|
||||||
|
|
||||||
|
|
||||||
def select_gpu() -> GpuType:
|
def select_gpu() -> GpuType:
|
||||||
@ -226,10 +224,6 @@ def select_gpu() -> GpuType:
|
|||||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
|
"an [gold1 b]NVIDIA[/] GPU (using CUDA™)",
|
||||||
GpuType.CUDA,
|
GpuType.CUDA,
|
||||||
)
|
)
|
||||||
nvidia_with_dml = (
|
|
||||||
"an [gold1 b]NVIDIA[/] GPU (using CUDA™, and DirectML™ for ONNX) -- ALPHA",
|
|
||||||
GpuType.CUDA_AND_DML,
|
|
||||||
)
|
|
||||||
amd = (
|
amd = (
|
||||||
"an [gold1 b]AMD[/] GPU (using ROCm™)",
|
"an [gold1 b]AMD[/] GPU (using ROCm™)",
|
||||||
GpuType.ROCM,
|
GpuType.ROCM,
|
||||||
@ -238,27 +232,19 @@ def select_gpu() -> GpuType:
|
|||||||
"Do not install any GPU support, use CPU for generation (slow)",
|
"Do not install any GPU support, use CPU for generation (slow)",
|
||||||
GpuType.CPU,
|
GpuType.CPU,
|
||||||
)
|
)
|
||||||
autodetect = (
|
|
||||||
"I'm not sure what to choose",
|
|
||||||
GpuType.AUTODETECT,
|
|
||||||
)
|
|
||||||
|
|
||||||
options = []
|
options = []
|
||||||
if OS == "Windows":
|
if OS == "Windows":
|
||||||
options = [nvidia, nvidia_with_dml, cpu]
|
options = [nvidia, cpu]
|
||||||
if OS == "Linux":
|
if OS == "Linux":
|
||||||
options = [nvidia, amd, cpu]
|
options = [nvidia, amd, cpu]
|
||||||
elif OS == "Darwin":
|
elif OS == "Darwin":
|
||||||
options = [cpu]
|
options = [cpu]
|
||||||
# future CoreML?
|
|
||||||
|
|
||||||
if len(options) == 1:
|
if len(options) == 1:
|
||||||
print(f'Your platform [gold1]{OS}-{ARCH}[/] only supports the "{options[0][1]}" driver. Proceeding with that.')
|
print(f'Your platform [gold1]{OS}-{ARCH}[/] only supports the "{options[0][1]}" driver. Proceeding with that.')
|
||||||
return options[0][1]
|
return options[0][1]
|
||||||
|
|
||||||
# "I don't know" is always added the last option
|
|
||||||
options.append(autodetect) # type: ignore
|
|
||||||
|
|
||||||
options = {str(i): opt for i, opt in enumerate(options, 1)}
|
options = {str(i): opt for i, opt in enumerate(options, 1)}
|
||||||
|
|
||||||
console.rule(":space_invader: GPU (Graphics Card) selection :space_invader:")
|
console.rule(":space_invader: GPU (Graphics Card) selection :space_invader:")
|
||||||
@ -292,11 +278,6 @@ def select_gpu() -> GpuType:
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
if options[choice][1] is GpuType.AUTODETECT:
|
|
||||||
console.print(
|
|
||||||
"No problem. We will install CUDA support first :crossed_fingers: If Invoke does not detect a GPU, please re-run the installer and select one of the other GPU types."
|
|
||||||
)
|
|
||||||
|
|
||||||
return options[choice][1]
|
return options[choice][1]
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user