Update l2i invoke and seamless to support AutoencoderTiny, remove attention processors if no mid_block is detected

This commit is contained in:
Brandon Rising
2024-03-12 12:00:24 -04:00
parent 43948e0758
commit 7b393656de
2 changed files with 5 additions and 3 deletions

View File

@ -837,14 +837,15 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
latents = context.tensors.load(self.latents.latents_name)
vae_info = context.models.load(self.vae.vae)
assert isinstance(vae_info.model, (UNet2DConditionModel, AutoencoderKL))
assert isinstance(vae_info.model, (UNet2DConditionModel, AutoencoderKL, AutoencoderTiny))
with set_seamless(vae_info.model, self.vae.seamless_axes), vae_info as vae:
assert isinstance(vae, torch.nn.Module)
latents = latents.to(vae.device)
if self.fp32:
vae.to(dtype=torch.float32)
use_torch_2_0_or_xformers = isinstance(
# AutoencoderTiny doesn't contain a mid_block property or appear to accept attn processors
use_torch_2_0_or_xformers = hasattr(vae.decoder, "mid_block") and isinstance(
vae.decoder.mid_block.attentions[0].processor,
(
AttnProcessor2_0,