mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
fix(latent): remove temporary workaround for lack of TAESD tiling support.
Now available in diffusers 0.21: https://github.com/huggingface/diffusers/pull/4627
This commit is contained in:
parent
090db1ab3a
commit
d219167849
@ -48,7 +48,6 @@ from ...backend.stable_diffusion.diffusers_pipeline import (
|
|||||||
from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings
|
from ...backend.stable_diffusion.diffusion.shared_invokeai_diffusion import PostprocessingSettings
|
||||||
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
|
from ...backend.stable_diffusion.schedulers import SCHEDULER_MAP
|
||||||
from ...backend.util.devices import choose_precision, choose_torch_device
|
from ...backend.util.devices import choose_precision, choose_torch_device
|
||||||
from ...backend.util.logging import InvokeAILogger
|
|
||||||
from ..models.image import ImageCategory, ResourceOrigin
|
from ..models.image import ImageCategory, ResourceOrigin
|
||||||
from .baseinvocation import (
|
from .baseinvocation import (
|
||||||
BaseInvocation,
|
BaseInvocation,
|
||||||
@ -608,19 +607,10 @@ class LatentsToImageInvocation(BaseInvocation):
|
|||||||
vae.to(dtype=torch.float16)
|
vae.to(dtype=torch.float16)
|
||||||
latents = latents.half()
|
latents = latents.half()
|
||||||
|
|
||||||
try:
|
if self.tiled or context.services.configuration.tiled_decode:
|
||||||
if self.tiled or context.services.configuration.tiled_decode:
|
vae.enable_tiling()
|
||||||
vae.enable_tiling()
|
else:
|
||||||
else:
|
vae.disable_tiling()
|
||||||
vae.disable_tiling()
|
|
||||||
except AttributeError as err:
|
|
||||||
# FIXME: This is a TEMPORARY measure until AutoencoderTiny gets tiling support from https://github.com/huggingface/diffusers/pull/4627
|
|
||||||
if err.name.endswith("_tiling"):
|
|
||||||
InvokeAILogger.getLogger(self.__class__.__name__).debug(
|
|
||||||
"ignoring tiling error for %s", vae.__class__, exc_info=err
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
# clear memory as vae decode can request a lot
|
# clear memory as vae decode can request a lot
|
||||||
torch.cuda.empty_cache()
|
torch.cuda.empty_cache()
|
||||||
@ -783,19 +773,10 @@ class ImageToLatentsInvocation(BaseInvocation):
|
|||||||
vae.to(dtype=torch.float16)
|
vae.to(dtype=torch.float16)
|
||||||
# latents = latents.half()
|
# latents = latents.half()
|
||||||
|
|
||||||
try:
|
if tiled:
|
||||||
if tiled:
|
vae.enable_tiling()
|
||||||
vae.enable_tiling()
|
else:
|
||||||
else:
|
vae.disable_tiling()
|
||||||
vae.disable_tiling()
|
|
||||||
except AttributeError as err:
|
|
||||||
# FIXME: This is a TEMPORARY measure until AutoencoderTiny gets tiling support from https://github.com/huggingface/diffusers/pull/4627
|
|
||||||
if err.name.endswith("_tiling"):
|
|
||||||
InvokeAILogger.getLogger(ImageToLatentsInvocation.__name__).debug(
|
|
||||||
"ignoring tiling error for %s", vae.__class__, exc_info=err
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
# non_noised_latents_from_image
|
# non_noised_latents_from_image
|
||||||
image_tensor = image_tensor.to(device=vae.device, dtype=vae.dtype)
|
image_tensor = image_tensor.to(device=vae.device, dtype=vae.dtype)
|
||||||
|
Loading…
Reference in New Issue
Block a user