From e3a70e598e0c566f2f43bc67588edc0d87c78738 Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 3 Jun 2024 08:40:29 +1000 Subject: [PATCH] docs(app): simplify docstring in invocation_context --- invokeai/app/services/shared/invocation_context.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/invokeai/app/services/shared/invocation_context.py b/invokeai/app/services/shared/invocation_context.py index 08ca207118..27a29f6646 100644 --- a/invokeai/app/services/shared/invocation_context.py +++ b/invokeai/app/services/shared/invocation_context.py @@ -465,12 +465,10 @@ class ModelsInterface(InvocationContextInterface): """ Download, cache, and load the model file located at the indicated URL. - This will check the model download cache for the model designated - by the provided URL and download it if needed using download_and_cache_ckpt(). - It will then load the model into the RAM cache. If the optional loader - argument is provided, the loader will be invoked to load the model into - memory. Otherwise the method will call safetensors.torch.load_file() or - torch.load() as appropriate to the file suffix. + If the model is already downloaded, it will be loaded from the cache. + + If the a loader callable is provided, it will be invoked to load the model. Otherwise, + `safetensors.torch.load_file()` or `torch.load()` will be called to load the model. Be aware that the LoadedModel object will have a `config` attribute of None.