From 2bf747caf6a5ea5481984e2545c73af955c1b54c Mon Sep 17 00:00:00 2001 From: Sergey Borisov Date: Mon, 28 Aug 2023 18:36:27 +0300 Subject: [PATCH] Blackify --- invokeai/backend/util/hotfixes.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/invokeai/backend/util/hotfixes.py b/invokeai/backend/util/hotfixes.py index cf97d494d7..161a35eb52 100644 --- a/invokeai/backend/util/hotfixes.py +++ b/invokeai/backend/util/hotfixes.py @@ -764,6 +764,7 @@ diffusers.models.controlnet.ControlNetModel = ControlNetModel try: import xformers + xformers_available = True except: xformers_available = False @@ -772,27 +773,28 @@ except: if xformers_available: # TODO: remove when fixed in diffusers _xformers_memory_efficient_attention = xformers.ops.memory_efficient_attention + def new_memory_efficient_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, - attn_bias = None, + attn_bias=None, p: float = 0.0, scale: Optional[float] = None, *, - op = None, + op=None, ): # diffusers not align shape to 8, which is required by xformers if attn_bias is not None and type(attn_bias) is torch.Tensor: orig_size = attn_bias.shape[-1] new_size = ((orig_size + 7) // 8) * 8 - aligned_attn_bias = torch.zeros( + aligned_attn_bias = torch.zeros( (attn_bias.shape[0], attn_bias.shape[1], new_size), device=attn_bias.device, dtype=attn_bias.dtype, ) - aligned_attn_bias[:,:,:orig_size] = attn_bias - attn_bias = aligned_attn_bias[:,:,:orig_size] + aligned_attn_bias[:, :, :orig_size] = attn_bias + attn_bias = aligned_attn_bias[:, :, :orig_size] return _xformers_memory_efficient_attention( query=query,