Another attempt

This commit is contained in:
Billy
2025-06-20 14:10:06 +10:00
parent 5851c46c81
commit 4ee54eac1d
2 changed files with 5 additions and 3 deletions

View File

@ -86,14 +86,14 @@ class LoRALoader(ModelLoader):
state_dict = convert_sdxl_keys_to_diffusers_format(state_dict)
model = lora_model_from_sd_state_dict(state_dict=state_dict)
elif self._model_base == BaseModelType.Flux:
if config.format == ModelFormat.Diffusers:
if config.format in [ModelFormat.Diffusers, ModelFormat.OMI]:
# HACK(ryand): We set alpha=None for diffusers PEFT format models. These models are typically
# distributed as a single file without the associated metadata containing the alpha value. We chose
# alpha=None, because this is treated as alpha=rank internally in `LoRALayerBase.scale()`. alpha=rank
# is a popular choice. For example, in the diffusers training scripts:
# https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora_flux.py#L1194
model = lora_model_from_flux_diffusers_state_dict(state_dict=state_dict, alpha=None)
elif config.format in [ModelFormat.LyCORIS, ModelFormat.OMI]:
elif config.format == ModelFormat.LyCORIS:
if is_state_dict_likely_in_flux_kohya_format(state_dict=state_dict):
model = lora_model_from_flux_kohya_state_dict(state_dict=state_dict)
elif is_state_dict_likely_in_flux_onetrainer_format(state_dict=state_dict):

View File

@ -15,4 +15,6 @@ def convert_from_omi(weights_sd: StateDict, base: BaseModelType):
BaseModelType.StableDiffusion1: convert_sd_lora_key_sets(),
BaseModelType.StableDiffusion3: convert_sd3_lora_key_sets(),
}[base]
return lora_util.__convert(weights_sd, keyset, "omi", "diffusers")
target = "diffusers" # alternatively, "legacy_diffusers"
return lora_util.__convert(weights_sd, keyset, "omi", target) # type: ignore