From 7b393656de84b1caec055ae6ec9fefa5a48f03c8 Mon Sep 17 00:00:00 2001 From: Brandon Rising Date: Tue, 12 Mar 2024 12:00:24 -0400 Subject: [PATCH] Update l2i invoke and seamless to support AutoencoderTiny, remove attention processors if no mid_block is detected --- invokeai/app/invocations/latent.py | 5 +++-- invokeai/backend/stable_diffusion/seamless.py | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/invokeai/app/invocations/latent.py b/invokeai/app/invocations/latent.py index fbc34bc583..0e0b57ea7f 100644 --- a/invokeai/app/invocations/latent.py +++ b/invokeai/app/invocations/latent.py @@ -837,14 +837,15 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard): latents = context.tensors.load(self.latents.latents_name) vae_info = context.models.load(self.vae.vae) - assert isinstance(vae_info.model, (UNet2DConditionModel, AutoencoderKL)) + assert isinstance(vae_info.model, (UNet2DConditionModel, AutoencoderKL, AutoencoderTiny)) with set_seamless(vae_info.model, self.vae.seamless_axes), vae_info as vae: assert isinstance(vae, torch.nn.Module) latents = latents.to(vae.device) if self.fp32: vae.to(dtype=torch.float32) - use_torch_2_0_or_xformers = isinstance( + # AutoencoderTiny doesn't contain a mid_block property or appear to accept attn processors + use_torch_2_0_or_xformers = hasattr(vae.decoder, "mid_block") and isinstance( vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, diff --git a/invokeai/backend/stable_diffusion/seamless.py b/invokeai/backend/stable_diffusion/seamless.py index fb9112b56d..e5af27bf4c 100644 --- a/invokeai/backend/stable_diffusion/seamless.py +++ b/invokeai/backend/stable_diffusion/seamless.py @@ -5,6 +5,7 @@ from typing import Callable, List, Union import torch.nn as nn from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL +from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel @@ -26,7 +27,7 @@ def _conv_forward_asymmetric(self, input, weight, bias): @contextmanager -def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL], seamless_axes: List[str]): +def set_seamless(model: Union[UNet2DConditionModel, AutoencoderKL, AutoencoderTiny], seamless_axes: List[str]): # Callable: (input: Tensor, weight: Tensor, bias: Optional[Tensor]) -> Tensor to_restore: list[tuple[nn.Conv2d | nn.ConvTranspose2d, Callable]] = [] try: