diff --git a/invokeai/app/invocations/flux_text_to_image.py b/invokeai/app/invocations/flux_text_to_image.py index 334e8fd1ea..ed744f441f 100644 --- a/invokeai/app/invocations/flux_text_to_image.py +++ b/invokeai/app/invocations/flux_text_to_image.py @@ -101,10 +101,7 @@ class FluxTextToImageInvocation(BaseInvocation, WithMetadata, WithBoard): # if the cache is not empty. # context.models._services.model_manager.load.ram_cache.make_room(24 * 2**30) - with ( - transformer_info as transformer, - scheduler_info as scheduler - ): + with transformer_info as transformer, scheduler_info as scheduler: assert isinstance(transformer, FluxTransformer2DModel) assert isinstance(scheduler, FlowMatchEulerDiscreteScheduler) diff --git a/invokeai/app/invocations/model.py b/invokeai/app/invocations/model.py index 4672f6a83d..c3902c1cb1 100644 --- a/invokeai/app/invocations/model.py +++ b/invokeai/app/invocations/model.py @@ -60,11 +60,11 @@ class CLIPField(BaseModel): loras: List[LoRAField] = Field(description="LoRAs to apply on model loading") - class TransformerField(BaseModel): transformer: ModelIdentifierField = Field(description="Info to load Transformer submodel") scheduler: ModelIdentifierField = Field(description="Info to load scheduler submodel") + class T5EncoderField(BaseModel): tokenizer: ModelIdentifierField = Field(description="Info to load tokenizer submodel") text_encoder: ModelIdentifierField = Field(description="Info to load text_encoder submodel") diff --git a/invokeai/backend/model_manager/load/model_util.py b/invokeai/backend/model_manager/load/model_util.py index b3b78104d9..9794b8098e 100644 --- a/invokeai/backend/model_manager/load/model_util.py +++ b/invokeai/backend/model_manager/load/model_util.py @@ -52,9 +52,7 @@ def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int: return model.calc_size() elif isinstance( model, - ( - T5TokenizerFast, - ), + (T5TokenizerFast,), ): return len(model) else: diff --git a/invokeai/backend/model_manager/util/select_hf_files.py b/invokeai/backend/model_manager/util/select_hf_files.py index 2e86d9a62e..60abc3384c 100644 --- a/invokeai/backend/model_manager/util/select_hf_files.py +++ b/invokeai/backend/model_manager/util/select_hf_files.py @@ -54,7 +54,7 @@ def filter_files( "lora_weights.safetensors", "weights.pb", "onnx_data", - "spiece.model", # Added for `black-forest-labs/FLUX.1-schnell`. + "spiece.model", # Added for `black-forest-labs/FLUX.1-schnell`. ) ): paths.append(file) diff --git a/invokeai/backend/quantization/fast_quantized_diffusion_model.py b/invokeai/backend/quantization/fast_quantized_diffusion_model.py index 395efc99c4..b1531094d1 100644 --- a/invokeai/backend/quantization/fast_quantized_diffusion_model.py +++ b/invokeai/backend/quantization/fast_quantized_diffusion_model.py @@ -19,7 +19,7 @@ from invokeai.backend.requantize import requantize class FastQuantizedDiffusersModel(QuantizedDiffusersModel): @classmethod - def from_pretrained(cls, model_name_or_path: Union[str, os.PathLike], base_class = FluxTransformer2DModel, **kwargs): + def from_pretrained(cls, model_name_or_path: Union[str, os.PathLike], base_class=FluxTransformer2DModel, **kwargs): """We override the `from_pretrained()` method in order to use our custom `requantize()` implementation.""" base_class = base_class or cls.base_class if base_class is None: diff --git a/invokeai/backend/quantization/fast_quantized_transformers_model.py b/invokeai/backend/quantization/fast_quantized_transformers_model.py index 99f889b4af..5f16bae611 100644 --- a/invokeai/backend/quantization/fast_quantized_transformers_model.py +++ b/invokeai/backend/quantization/fast_quantized_transformers_model.py @@ -15,7 +15,9 @@ from invokeai.backend.requantize import requantize class FastQuantizedTransformersModel(QuantizedTransformersModel): @classmethod - def from_pretrained(cls, model_name_or_path: Union[str, os.PathLike], auto_class = AutoModelForTextEncoding, **kwargs): + def from_pretrained( + cls, model_name_or_path: Union[str, os.PathLike], auto_class=AutoModelForTextEncoding, **kwargs + ): """We override the `from_pretrained()` method in order to use our custom `requantize()` implementation.""" auto_class = auto_class or cls.auto_class if auto_class is None: