fix(backend): apply clip skip after lora

This handles LoRAs that attempt to modify layers skipped by CLIP Skip.
This commit is contained in:
psychedelicious 2023-11-14 07:42:10 +11:00
parent 8883ecb2bf
commit e8b83fecff

View File

@ -112,10 +112,11 @@ class CompelInvocation(BaseInvocation):
tokenizer, tokenizer,
ti_manager, ti_manager,
), ),
ModelPatcher.apply_clip_skip(text_encoder_info.context.model, self.clip.skipped_layers),
text_encoder_info as text_encoder, text_encoder_info as text_encoder,
# Apply the LoRA after text_encoder has been moved to its target device for faster patching. # Apply the LoRA after text_encoder has been moved to its target device for faster patching.
ModelPatcher.apply_lora_text_encoder(text_encoder, _lora_loader()), ModelPatcher.apply_lora_text_encoder(text_encoder, _lora_loader()),
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
ModelPatcher.apply_clip_skip(text_encoder_info.context.model, self.clip.skipped_layers),
): ):
compel = Compel( compel = Compel(
tokenizer=tokenizer, tokenizer=tokenizer,
@ -234,10 +235,11 @@ class SDXLPromptInvocationBase:
tokenizer, tokenizer,
ti_manager, ti_manager,
), ),
ModelPatcher.apply_clip_skip(text_encoder_info.context.model, clip_field.skipped_layers),
text_encoder_info as text_encoder, text_encoder_info as text_encoder,
# Apply the LoRA after text_encoder has been moved to its target device for faster patching. # Apply the LoRA after text_encoder has been moved to its target device for faster patching.
ModelPatcher.apply_lora(text_encoder, _lora_loader(), lora_prefix), ModelPatcher.apply_lora(text_encoder, _lora_loader(), lora_prefix),
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
ModelPatcher.apply_clip_skip(text_encoder_info.context.model, clip_field.skipped_layers),
): ):
compel = Compel( compel = Compel(
tokenizer=tokenizer, tokenizer=tokenizer,