From 5e8cf9fb6a7e755e4257677cabb2c1cdcedb19cf Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Wed, 28 Aug 2024 14:33:01 +0000 Subject: [PATCH] Remove hack to clear cache from the FluxTextToImageInvocation. We now clear the cache based on the on-disk model size. --- invokeai/app/invocations/flux_text_to_image.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/invokeai/app/invocations/flux_text_to_image.py b/invokeai/app/invocations/flux_text_to_image.py index b6ff06c67b..3ac45ba19c 100644 --- a/invokeai/app/invocations/flux_text_to_image.py +++ b/invokeai/app/invocations/flux_text_to_image.py @@ -101,11 +101,6 @@ class FluxTextToImageInvocation(BaseInvocation, WithMetadata, WithBoard): bs, t5_seq_len, _ = t5_embeddings.shape txt_ids = torch.zeros(bs, t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device()) - # HACK(ryand): Manually empty the cache. Currently we don't check the size of the model before loading it from - # disk. Since the transformer model is large (24GB), there's a good chance that it will OOM on 32GB RAM systems - # if the cache is not empty. - context.models._services.model_manager.load.ram_cache.make_room(24 * 2**30) - with transformer_info as transformer: assert isinstance(transformer, Flux)