diff --git a/invokeai/backend/stable_diffusion/diffusion/unet_attention_patcher.py b/invokeai/backend/stable_diffusion/diffusion/unet_attention_patcher.py index e94d78decb..f74359c614 100644 --- a/invokeai/backend/stable_diffusion/diffusion/unet_attention_patcher.py +++ b/invokeai/backend/stable_diffusion/diffusion/unet_attention_patcher.py @@ -33,25 +33,24 @@ class UNetAttentionPatcher: # "attn1" processors do not use IP-Adapters. attn_procs[name] = CustomAttnProcessor2_0() else: - total_ip_adapter_attention_weights: list[IPAdapterAttentionWeights] = [] + # Collect the weights from each IP Adapter for the idx'th attention processor. + ip_adapter_attention_weights_collection: list[IPAdapterAttentionWeights] = [] for ip_adapter in self._ip_adapters: ip_adapter_attention_weights: IPAdapterAttentionWeights = IPAdapterAttentionWeights( ip_adapter_weights=None, skip=False ) - ip_adapter_weight = ip_adapter["ip_adapter"].attn_weights.get_attention_processor_weights(idx) + ip_adapter_weights = ip_adapter["ip_adapter"].attn_weights.get_attention_processor_weights(idx) skip = True for block in ip_adapter["target_blocks"]: if block in name: skip = False break - ip_adapter_attention_weights.ip_adapter_weights = ip_adapter_weight + ip_adapter_attention_weights.ip_adapter_weights = ip_adapter_weights ip_adapter_attention_weights.skip = skip - total_ip_adapter_attention_weights.append(ip_adapter_attention_weights) + ip_adapter_attention_weights_collection.append(ip_adapter_attention_weights) - # Collect the weights from each IP Adapter for the idx'th attention processor. - - attn_procs[name] = CustomAttnProcessor2_0(total_ip_adapter_attention_weights) + attn_procs[name] = CustomAttnProcessor2_0(ip_adapter_attention_weights_collection) return attn_procs