diff --git a/invokeai/backend/quantization/load_flux_model_bnb_llm_int8.py b/invokeai/backend/quantization/scripts/load_flux_model_bnb_llm_int8.py similarity index 97% rename from invokeai/backend/quantization/load_flux_model_bnb_llm_int8.py rename to invokeai/backend/quantization/scripts/load_flux_model_bnb_llm_int8.py index c01193e6ac..e8771dca22 100644 --- a/invokeai/backend/quantization/load_flux_model_bnb_llm_int8.py +++ b/invokeai/backend/quantization/scripts/load_flux_model_bnb_llm_int8.py @@ -6,7 +6,7 @@ from flux.util import configs as flux_configs from safetensors.torch import load_file, save_file from invokeai.backend.quantization.bnb_llm_int8 import quantize_model_llm_int8 -from invokeai.backend.quantization.load_flux_model_bnb_nf4 import log_time +from invokeai.backend.quantization.scripts.load_flux_model_bnb_nf4 import log_time def main(): diff --git a/invokeai/backend/quantization/load_flux_model_bnb_nf4.py b/invokeai/backend/quantization/scripts/load_flux_model_bnb_nf4.py similarity index 100% rename from invokeai/backend/quantization/load_flux_model_bnb_nf4.py rename to invokeai/backend/quantization/scripts/load_flux_model_bnb_nf4.py diff --git a/invokeai/backend/quantization/quantize_t5_xxl_bnb_llm_int8.py b/invokeai/backend/quantization/scripts/quantize_t5_xxl_bnb_llm_int8.py similarity index 98% rename from invokeai/backend/quantization/quantize_t5_xxl_bnb_llm_int8.py rename to invokeai/backend/quantization/scripts/quantize_t5_xxl_bnb_llm_int8.py index d37041248d..fc681e8fc5 100644 --- a/invokeai/backend/quantization/quantize_t5_xxl_bnb_llm_int8.py +++ b/invokeai/backend/quantization/scripts/quantize_t5_xxl_bnb_llm_int8.py @@ -5,7 +5,7 @@ from safetensors.torch import load_file, save_file from transformers import AutoConfig, AutoModelForTextEncoding, T5EncoderModel from invokeai.backend.quantization.bnb_llm_int8 import quantize_model_llm_int8 -from invokeai.backend.quantization.load_flux_model_bnb_nf4 import log_time +from invokeai.backend.quantization.scripts.load_flux_model_bnb_nf4 import log_time def load_state_dict_into_t5(model: T5EncoderModel, state_dict: dict):