mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Make float16 inference work with FLUX on 24GB GPU.
This commit is contained in:
parent
5870742bb9
commit
3cf0365a35
@ -147,6 +147,9 @@ class FluxTextToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
||||
transformer=transformer,
|
||||
)
|
||||
|
||||
t5_embeddings = t5_embeddings.to(dtype=transformer.dtype)
|
||||
clip_embeddings = clip_embeddings.to(dtype=transformer.dtype)
|
||||
|
||||
latents = flux_pipeline_with_transformer(
|
||||
height=self.height,
|
||||
width=self.width,
|
||||
|
Loading…
Reference in New Issue
Block a user