diff --git a/invokeai/backend/model_management/model_probe.py b/invokeai/backend/model_management/model_probe.py index 2828cc7ab1..938868e714 100644 --- a/invokeai/backend/model_management/model_probe.py +++ b/invokeai/backend/model_management/model_probe.py @@ -78,7 +78,6 @@ class ModelProbe(object): format_type = 'diffusers' if model_path.is_dir() else 'checkpoint' else: format_type = 'diffusers' if isinstance(model,(ConfigMixin,ModelMixin)) else 'checkpoint' - model_info = None try: model_type = cls.get_model_type_from_folder(model_path, model) \ @@ -105,7 +104,7 @@ class ModelProbe(object): ) else 512, ) except Exception: - return None + raise return model_info diff --git a/invokeai/frontend/install/model_install.py b/invokeai/frontend/install/model_install.py index 33ef114912..f3ebcb22be 100644 --- a/invokeai/frontend/install/model_install.py +++ b/invokeai/frontend/install/model_install.py @@ -678,9 +678,8 @@ def select_and_download_models(opt: Namespace): # this is where the TUI is called else: - # needed because the torch library is loaded, even though we don't use it - # currently commented out because it has started generating errors (?) - # torch.multiprocessing.set_start_method("spawn") + # needed to support the probe() method running under a subprocess + torch.multiprocessing.set_start_method("spawn") # the third argument is needed in the Windows 11 environment in # order to launch and resize a console window running this program