From bb5648983fa67f05c8f474868d646528038491a6 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Fri, 7 Jun 2024 11:45:42 -0400 Subject: [PATCH] Minor improvements to LatentsToImageInvocation type hints. --- invokeai/app/invocations/latents_to_image.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/invokeai/app/invocations/latents_to_image.py b/invokeai/app/invocations/latents_to_image.py index 07658f452f..8134a9bfeb 100644 --- a/invokeai/app/invocations/latents_to_image.py +++ b/invokeai/app/invocations/latents_to_image.py @@ -8,7 +8,6 @@ from diffusers.models.attention_processor import ( ) from diffusers.models.autoencoders.autoencoder_kl import AutoencoderKL from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny -from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel from PIL import Image from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation @@ -59,9 +58,9 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard): use_fp32: bool, use_tiling: bool, ) -> Image.Image: - assert isinstance(vae_info.model, (UNet2DConditionModel, AutoencoderKL, AutoencoderTiny)) + assert isinstance(vae_info.model, (AutoencoderKL, AutoencoderTiny)) with set_seamless(vae_info.model, seamless_axes), vae_info as vae: - assert isinstance(vae, torch.nn.Module) + assert isinstance(vae, (AutoencoderKL, AutoencoderTiny)) latents = latents.to(vae.device) if use_fp32: vae.to(dtype=torch.float32)