From 50085b40bb8a6ab0485e1e75a8064de05724f964 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Mon, 26 Aug 2024 23:39:00 +0000 Subject: [PATCH] Update starter model size estimates. --- invokeai/backend/model_manager/starter_models.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/invokeai/backend/model_manager/starter_models.py b/invokeai/backend/model_manager/starter_models.py index d08fc9fc97..76b91f0d34 100644 --- a/invokeai/backend/model_manager/starter_models.py +++ b/invokeai/backend/model_manager/starter_models.py @@ -94,7 +94,7 @@ STARTER_MODELS: list[StarterModel] = [ name="FLUX Schnell (Quantized)", base=BaseModelType.Flux, source="InvokeAI/flux_schnell::transformer/bnb_nf4/flux1-schnell-bnb_nf4.safetensors", - description="FLUX schnell transformer quantized to bitsandbytes NF4 format. Total size with dependencies: ~14GB", + description="FLUX schnell transformer quantized to bitsandbytes NF4 format. Total size with dependencies: ~12GB", type=ModelType.Main, dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder], ), @@ -102,7 +102,7 @@ STARTER_MODELS: list[StarterModel] = [ name="FLUX Dev (Quantized)", base=BaseModelType.Flux, source="InvokeAI/flux_dev::transformer/bnb_nf4/flux1-dev-bnb_nf4.safetensors", - description="FLUX dev transformer quantized to bitsandbytes NF4 format. Total size with dependencies: ~16GB", + description="FLUX dev transformer quantized to bitsandbytes NF4 format. Total size with dependencies: ~12GB", type=ModelType.Main, dependencies=[t5_8b_quantized_encoder, flux_vae, clip_l_encoder], ), @@ -118,7 +118,7 @@ STARTER_MODELS: list[StarterModel] = [ name="FLUX Dev", base=BaseModelType.Flux, source="InvokeAI/flux_dev::transformer/base/flux1-dev.safetensors", - description="FLUX dev transformer in bfloat16. Total size with dependencies: ~34GB", + description="FLUX dev transformer in bfloat16. Total size with dependencies: ~33GB", type=ModelType.Main, dependencies=[t5_base_encoder, flux_vae, clip_l_encoder], ),