mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
~7% speedup (1.57 to 1.69it/s) from switch to += in ldm.modules.attention. (#482)
Tested on 8GB eGPU nvidia setup so YMMV. 512x512 output, max VRAM stays same.
This commit is contained in:
parent
2cf8de9234
commit
529fc57f2b
@ -235,9 +235,9 @@ class BasicTransformerBlock(nn.Module):
|
||||
|
||||
def _forward(self, x, context=None):
|
||||
x = x.contiguous() if x.device.type == 'mps' else x
|
||||
x = self.attn1(self.norm1(x)) + x
|
||||
x = self.attn2(self.norm2(x), context=context) + x
|
||||
x = self.ff(self.norm3(x)) + x
|
||||
x += self.attn1(self.norm1(x))
|
||||
x += self.attn2(self.norm2(x), context=context)
|
||||
x += self.ff(self.norm3(x))
|
||||
return x
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user