Patch LoRA on device when model is already on device.

This commit is contained in:
Ryan Dick
2023-10-31 15:39:54 -04:00
committed by Kent Keirsey
parent 545c811bf1
commit 379d68f595
3 changed files with 26 additions and 9 deletions

View File

@ -710,9 +710,10 @@ class DenoiseLatentsInvocation(BaseInvocation):
)
with (
ExitStack() as exit_stack,
ModelPatcher.apply_lora_unet(unet_info.context.model, _lora_loader()),
set_seamless(unet_info.context.model, self.unet.seamless_axes),
unet_info as unet,
# Apply the LoRA after unet has been moved to its target device for faster patching.
ModelPatcher.apply_lora_unet(unet, _lora_loader()),
):
latents = latents.to(device=unet.device, dtype=unet.dtype)
if noise is not None: