From e8fb8f4d12b30ea864a7ee2044476253e2693364 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Thu, 8 Aug 2024 18:12:04 -0400 Subject: [PATCH] Make float16 inference work with FLUX on 24GB GPU. --- invokeai/app/invocations/flux_text_to_image.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/invokeai/app/invocations/flux_text_to_image.py b/invokeai/app/invocations/flux_text_to_image.py index b059ab23da..a680908461 100644 --- a/invokeai/app/invocations/flux_text_to_image.py +++ b/invokeai/app/invocations/flux_text_to_image.py @@ -147,6 +147,9 @@ class FluxTextToImageInvocation(BaseInvocation, WithMetadata, WithBoard): transformer=transformer, ) + t5_embeddings = t5_embeddings.to(dtype=transformer.dtype) + clip_embeddings = clip_embeddings.to(dtype=transformer.dtype) + latents = flux_pipeline_with_transformer( height=self.height, width=self.width,