fix: Loras not working correctly with Inpainting

This commit is contained in:
blessedcoolant 2023-07-13 22:59:38 +12:00
parent 16f53228c2
commit 430b9c291f

View File

@ -154,18 +154,20 @@ class InpaintInvocation(BaseInvocation):
@contextmanager @contextmanager
def load_model_old_way(self, context, scheduler): def load_model_old_way(self, context, scheduler):
def _lora_loader():
for lora in self.unet.loras:
lora_info = context.services.model_manager.get_model(
**lora.dict(exclude={"weight"}))
yield (lora_info.context.model, lora.weight)
del lora_info
return
unet_info = context.services.model_manager.get_model(**self.unet.unet.dict()) unet_info = context.services.model_manager.get_model(**self.unet.unet.dict())
vae_info = context.services.model_manager.get_model(**self.vae.vae.dict()) vae_info = context.services.model_manager.get_model(**self.vae.vae.dict())
#unet = unet_info.context.model
#vae = vae_info.context.model
with ExitStack() as stack:
loras = [(stack.enter_context(context.services.model_manager.get_model(**lora.dict(exclude={"weight"}))), lora.weight) for lora in self.unet.loras]
with vae_info as vae,\ with vae_info as vae,\
unet_info as unet,\ ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()),\
ModelPatcher.apply_lora_unet(unet, loras): unet_info as unet:
device = context.services.model_manager.mgr.cache.execution_device device = context.services.model_manager.mgr.cache.execution_device
dtype = context.services.model_manager.mgr.cache.precision dtype = context.services.model_manager.mgr.cache.precision