mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Update probe to always use cpu for loading models
This commit is contained in:
parent
59b4a23479
commit
0b238b1ece
@ -323,7 +323,7 @@ class ModelProbe(object):
|
|||||||
with SilenceWarnings():
|
with SilenceWarnings():
|
||||||
if model_path.suffix.endswith((".ckpt", ".pt", ".pth", ".bin")):
|
if model_path.suffix.endswith((".ckpt", ".pt", ".pth", ".bin")):
|
||||||
cls._scan_model(model_path.name, model_path)
|
cls._scan_model(model_path.name, model_path)
|
||||||
model = torch.load(model_path)
|
model = torch.load(model_path, map_location="cpu")
|
||||||
assert isinstance(model, dict)
|
assert isinstance(model, dict)
|
||||||
return model
|
return model
|
||||||
else:
|
else:
|
||||||
|
Loading…
Reference in New Issue
Block a user