From 3f79467f7bf153f142ea568b54bc0e0694375806 Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Wed, 17 Jul 2024 04:24:45 +0300 Subject: [PATCH] Ruff format --- .../stable_diffusion/diffusion/conditioning_data.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py b/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py index b017454a78..5fe1483ebc 100644 --- a/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py +++ b/invokeai/backend/stable_diffusion/diffusion/conditioning_data.py @@ -3,7 +3,7 @@ from __future__ import annotations import math from dataclasses import dataclass from enum import Enum -from typing import TYPE_CHECKING, List, Optional, Union +from typing import TYPE_CHECKING, List, Optional, Tuple, Union import torch @@ -231,7 +231,7 @@ class TextConditioningData: conditionings: List[torch.Tensor], ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """Concatenate provided conditioning tensors to one batched tensor. - If tensors have different sizes then pad them by zeros and creates + If tensors have different sizes then pad them by zeros and creates encoder_attention_mask to exclude padding from attention. Args: @@ -242,9 +242,7 @@ class TextConditioningData: if any(c.shape[1] != max_len for c in conditionings): encoder_attention_masks = [None] * len(conditionings) for i in range(len(conditionings)): - conditionings[i], encoder_attention_masks[i] = cls._pad_conditioning( - conditionings[i], max_len - ) + conditionings[i], encoder_attention_masks[i] = cls._pad_conditioning(conditionings[i], max_len) encoder_attention_mask = torch.cat(encoder_attention_masks) return torch.cat(conditionings), encoder_attention_mask