mirror of
https://github.com/invoke-ai/InvokeAI
synced 2024-08-30 20:32:17 +00:00
Remove hack to clear cache from the FluxTextToImageInvocation. We now clear the cache based on the on-disk model size.
This commit is contained in:
parent
c738fe051f
commit
5e8cf9fb6a
@ -101,11 +101,6 @@ class FluxTextToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
|
|||||||
bs, t5_seq_len, _ = t5_embeddings.shape
|
bs, t5_seq_len, _ = t5_embeddings.shape
|
||||||
txt_ids = torch.zeros(bs, t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device())
|
txt_ids = torch.zeros(bs, t5_seq_len, 3, dtype=inference_dtype, device=TorchDevice.choose_torch_device())
|
||||||
|
|
||||||
# HACK(ryand): Manually empty the cache. Currently we don't check the size of the model before loading it from
|
|
||||||
# disk. Since the transformer model is large (24GB), there's a good chance that it will OOM on 32GB RAM systems
|
|
||||||
# if the cache is not empty.
|
|
||||||
context.models._services.model_manager.load.ram_cache.make_room(24 * 2**30)
|
|
||||||
|
|
||||||
with transformer_info as transformer:
|
with transformer_info as transformer:
|
||||||
assert isinstance(transformer, Flux)
|
assert isinstance(transformer, Flux)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user