mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Address change requests in first round of PR reviews.
Pending: - Move model install calls into model manager and create passthrus in invocation_context. - Consider splitting load_model_from_url() into a call to get the path and a call to load the path.
This commit is contained in:
@ -19,6 +19,7 @@ from invokeai.backend.model_manager.load.model_cache.model_cache_base import Mod
|
||||
from invokeai.backend.model_manager.load.model_util import calc_model_size_by_fs
|
||||
from invokeai.backend.model_manager.load.optimizations import skip_torch_weight_init
|
||||
from invokeai.backend.util.devices import TorchDevice
|
||||
from invokeai.backend.util.util import slugify
|
||||
|
||||
|
||||
# TO DO: The loader is not thread safe!
|
||||
@ -84,7 +85,7 @@ class ModelLoader(ModelLoaderBase):
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
cache_path: Path = self._convert_cache.cache_path(config.key)
|
||||
cache_path: Path = self._convert_cache.cache_path(slugify(model_path))
|
||||
if self._needs_conversion(config, model_path, cache_path):
|
||||
loaded_model = self._do_convert(config, model_path, cache_path, submodel_type)
|
||||
else:
|
||||
|
Reference in New Issue
Block a user