mirror of
https://github.com/invoke-ai/InvokeAI
synced 2025-07-26 05:17:55 +00:00
Rename ConcatenatedLoRALayer to MergedLayerPatch. And other minor cleanup.
This commit is contained in:
@ -98,7 +98,7 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
||||
raise ValueError(f"Unsupported model format: {t5_encoder_config.format}")
|
||||
|
||||
# Apply LoRA models to the T5 encoder.
|
||||
# Note: We apply the LoRA after the transformer has been moved to its target device for faster patching.
|
||||
# Note: We apply the LoRA after the encoder has been moved to its target device for faster patching.
|
||||
exit_stack.enter_context(
|
||||
LayerPatcher.apply_smart_model_patches(
|
||||
model=t5_text_encoder,
|
||||
|
@ -13,8 +13,8 @@ class Range:
|
||||
end: int
|
||||
|
||||
|
||||
class ConcatenatedLoRALayer(BaseLayerPatch):
|
||||
"""A patch layer that is composed of multiple sub-layers concatenated together.
|
||||
class MergedLayerPatch(BaseLayerPatch):
|
||||
"""A patch layer that is composed of multiple sub-layers merged together.
|
||||
|
||||
This class was created to handle a special case with FLUX LoRA models. In the BFL FLUX model format, the attention
|
||||
Q, K, V matrices are concatenated along the first dimension. In the diffusers LoRA format, the Q, K, V matrices are
|
@ -3,7 +3,7 @@ from typing import Dict
|
||||
import torch
|
||||
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer, Range
|
||||
from invokeai.backend.patches.layers.merged_layer_patch import MergedLayerPatch, Range
|
||||
from invokeai.backend.patches.layers.utils import any_lora_layer_from_state_dict
|
||||
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_TRANSFORMER_PREFIX
|
||||
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
|
||||
@ -113,7 +113,7 @@ def lora_layers_from_flux_diffusers_grouped_state_dict(
|
||||
|
||||
dim_0_offset += src_weight_shape[0]
|
||||
|
||||
layers[dst_qkv_key] = ConcatenatedLoRALayer(sub_layers, sub_layer_ranges)
|
||||
layers[dst_qkv_key] = MergedLayerPatch(sub_layers, sub_layer_ranges)
|
||||
|
||||
# time_text_embed.timestep_embedder -> time_in.
|
||||
add_lora_layer_if_present("time_text_embed.timestep_embedder.linear_1", "time_in.in_layer")
|
||||
|
@ -13,10 +13,10 @@ from invokeai.backend.model_manager.load.model_cache.torch_module_autocast.torch
|
||||
)
|
||||
from invokeai.backend.patches.layer_patcher import LayerPatcher
|
||||
from invokeai.backend.patches.layers.base_layer_patch import BaseLayerPatch
|
||||
from invokeai.backend.patches.layers.concatenated_lora_layer import ConcatenatedLoRALayer, Range
|
||||
from invokeai.backend.patches.layers.flux_control_lora_layer import FluxControlLoRALayer
|
||||
from invokeai.backend.patches.layers.lokr_layer import LoKRLayer
|
||||
from invokeai.backend.patches.layers.lora_layer import LoRALayer
|
||||
from invokeai.backend.patches.layers.merged_layer_patch import MergedLayerPatch, Range
|
||||
from invokeai.backend.util.original_weights_storage import OriginalWeightsStorage
|
||||
from tests.backend.model_manager.load.model_cache.torch_module_autocast.custom_modules.test_custom_invoke_linear_8_bit_lt import (
|
||||
build_linear_8bit_lt_layer,
|
||||
@ -328,7 +328,7 @@ def patch_under_test(request: pytest.FixtureRequest) -> PatchUnderTest:
|
||||
elif layer_type == "concatenated_lora":
|
||||
sub_layer_out_features = [16, 16, 32]
|
||||
|
||||
# Create a ConcatenatedLoRA layer.
|
||||
# Create a MergedLayerPatch.
|
||||
sub_layers: list[LoRALayer] = []
|
||||
sub_layer_ranges: list[Range] = []
|
||||
dim_0_offset = 0
|
||||
@ -339,10 +339,10 @@ def patch_under_test(request: pytest.FixtureRequest) -> PatchUnderTest:
|
||||
sub_layers.append(LoRALayer(up=up, mid=None, down=down, alpha=1.0, bias=bias))
|
||||
sub_layer_ranges.append(Range(dim_0_offset, dim_0_offset + out_features))
|
||||
dim_0_offset += out_features
|
||||
concatenated_lora_layer = ConcatenatedLoRALayer(sub_layers, sub_layer_ranges)
|
||||
merged_layer_patch = MergedLayerPatch(sub_layers, sub_layer_ranges)
|
||||
|
||||
input = torch.randn(1, in_features)
|
||||
return ([(concatenated_lora_layer, 0.7)], input)
|
||||
return ([(merged_layer_patch, 0.7)], input)
|
||||
elif layer_type == "flux_control_lora":
|
||||
# Create a FluxControlLoRALayer.
|
||||
patched_in_features = 40
|
||||
|
Reference in New Issue
Block a user