From ba1284968528896896cc438b9e5ca1655ed2dafc Mon Sep 17 00:00:00 2001 From: psychedelicious <4822129+psychedelicious@users.noreply.github.com> Date: Mon, 17 Jul 2023 17:16:55 +1000 Subject: [PATCH] fix(nodes): fix some model load events not emitting Missed adding the `context` arg to them initially --- invokeai/app/invocations/compel.py | 5 +++-- invokeai/app/invocations/generate.py | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index a5a9701149..c8a9bf4464 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -57,10 +57,10 @@ class CompelInvocation(BaseInvocation): @torch.no_grad() def invoke(self, context: InvocationContext) -> CompelOutput: tokenizer_info = context.services.model_manager.get_model( - **self.clip.tokenizer.dict(), + **self.clip.tokenizer.dict(), context=context, ) text_encoder_info = context.services.model_manager.get_model( - **self.clip.text_encoder.dict(), + **self.clip.text_encoder.dict(), context=context, ) def _lora_loader(): @@ -82,6 +82,7 @@ class CompelInvocation(BaseInvocation): model_name=name, base_model=self.clip.text_encoder.base_model, model_type=ModelType.TextualInversion, + context=context, ).context.model ) except ModelNotFoundException: diff --git a/invokeai/app/invocations/generate.py b/invokeai/app/invocations/generate.py index 6cdb83effc..b8e9ec2038 100644 --- a/invokeai/app/invocations/generate.py +++ b/invokeai/app/invocations/generate.py @@ -157,13 +157,13 @@ class InpaintInvocation(BaseInvocation): def _lora_loader(): for lora in self.unet.loras: lora_info = context.services.model_manager.get_model( - **lora.dict(exclude={"weight"})) + **lora.dict(exclude={"weight"}), context=context,) yield (lora_info.context.model, lora.weight) del lora_info return - unet_info = context.services.model_manager.get_model(**self.unet.unet.dict()) - vae_info = context.services.model_manager.get_model(**self.vae.vae.dict()) + unet_info = context.services.model_manager.get_model(**self.unet.unet.dict(), context=context,) + vae_info = context.services.model_manager.get_model(**self.vae.vae.dict(), context=context,) with vae_info as vae,\ ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()),\