Revert "L2I Performance updates"

This reverts commit 8f0352f3ad.
This commit is contained in:
Brandon Rising
2024-02-07 14:46:53 -05:00
parent 8f0352f3ad
commit db5f1c8623
2 changed files with 33 additions and 34 deletions

View File

@ -860,9 +860,9 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata):
vae.disable_tiling()
# clear memory as vae decode can request a lot
# torch.cuda.empty_cache()
# if choose_torch_device() == torch.device("mps"):
# mps.empty_cache()
torch.cuda.empty_cache()
if choose_torch_device() == torch.device("mps"):
mps.empty_cache()
with torch.inference_mode():
# copied from diffusers pipeline
@ -874,9 +874,9 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata):
image = VaeImageProcessor.numpy_to_pil(np_image)[0]
# torch.cuda.empty_cache()
# if choose_torch_device() == torch.device("mps"):
# mps.empty_cache()
torch.cuda.empty_cache()
if choose_torch_device() == torch.device("mps"):
mps.empty_cache()
image_dto = context.services.images.create(
image=image,

View File

@ -7,33 +7,23 @@ import torch.nn as nn
from diffusers.models import AutoencoderKL, UNet2DConditionModel
def _conv_forward_asymmetric(self, input, weight, bias=None):
def _conv_forward_asymmetric(self, input, weight, bias):
"""
Optimized patch for Conv2d._conv_forward that supports asymmetric padding.
Combines padding for both axes into a single operation.
Patch for Conv2d._conv_forward that supports asymmetric padding
"""
# Calculate the combined padding for both x and y axes
combined_padding = (
self.asymmetric_padding["x"][0], self.asymmetric_padding["x"][1],
self.asymmetric_padding["y"][2], self.asymmetric_padding["y"][3]
)
# Apply combined padding in a single operation
working = nn.functional.pad(input, combined_padding, mode=self.asymmetric_padding_mode["x"])
# Perform the convolution with no additional padding (since it's already applied)
working = nn.functional.pad(input, self.asymmetric_padding["x"], mode=self.asymmetric_padding_mode["x"])
working = nn.functional.pad(working, self.asymmetric_padding["y"], mode=self.asymmetric_padding_mode["y"])
return nn.functional.conv2d(
working,
weight,
bias,
self.stride,
(0, 0), # No additional padding needed as we've already padded
nn.modules.utils._pair(0),
self.dilation,
self.groups
self.groups,
)
@contextmanager
def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axes: List[str]):
try:
@ -81,16 +71,23 @@ def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axe
"""
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
# Directly set padding mode and values without redundant checks
m.asymmetric_padding_mode = {
"x": "circular" if "x" in seamless_axes else "constant",
"y": "circular" if "y" in seamless_axes else "constant"
}
m.asymmetric_padding = {
"x": (m.padding[0], m.padding[1], 0, 0),
"y": (0, 0, m.padding[2], m.padding[3])
}
# Backup and override the conv forward method
m.asymmetric_padding_mode = {}
m.asymmetric_padding = {}
m.asymmetric_padding_mode["x"] = "circular" if ("x" in seamless_axes) else "constant"
m.asymmetric_padding["x"] = (
m._reversed_padding_repeated_twice[0],
m._reversed_padding_repeated_twice[1],
0,
0,
)
m.asymmetric_padding_mode["y"] = "circular" if ("y" in seamless_axes) else "constant"
m.asymmetric_padding["y"] = (
0,
0,
m._reversed_padding_repeated_twice[2],
m._reversed_padding_repeated_twice[3],
)
to_restore.append((m, m._conv_forward))
m._conv_forward = _conv_forward_asymmetric.__get__(m, nn.Conv2d)
@ -99,5 +96,7 @@ def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axe
finally:
for module, orig_conv_forward in to_restore:
module._conv_forward = orig_conv_forward
del module.asymmetric_padding_mode
del module.asymmetric_padding
if hasattr(module, "asymmetric_padding_mode"):
del module.asymmetric_padding_mode
if hasattr(module, "asymmetric_padding"):
del module.asymmetric_padding