Install sub directories with folders correctly, ensure consistent dtype of tensors in flux pipeline and vae

This commit is contained in:
Brandon Rising
2024-08-19 13:59:44 -04:00
committed by Brandon
parent 81f0886d6f
commit 4bd7fda694
5 changed files with 7 additions and 9 deletions

View File

@ -64,8 +64,7 @@ class FluxVAELoader(GenericDiffusersLoader):
params = AutoEncoderParams(**filtered_data)
with SilenceWarnings():
model = load_class(params).to(self._torch_dtype)
# load_sft doesn't support torch.device
model = load_class(params)
sd = load_file(model_path)
model.load_state_dict(sd, strict=False, assign=True)
@ -203,8 +202,6 @@ class FluxBnbQuantizednf4bCheckpointModel(GenericDiffusersLoader):
with accelerate.init_empty_weights():
model = load_class(params)
model = quantize_model_nf4(model, modules_to_not_convert=set(), compute_dtype=torch.bfloat16)
# TODO(ryand): Right now, some of the weights are loaded in bfloat16. Think about how best to handle
# this on GPUs without bfloat16 support.
sd = load_file(model_path)
model.load_state_dict(sd, strict=False, assign=True)
return model

View File

@ -69,7 +69,7 @@ def filter_files(
# limit search to subfolder if requested
if subfolder:
subfolder = root / subfolder
paths = [x for x in paths if x.parent == Path(subfolder)]
paths = [x for x in paths if Path(subfolder) in x.parents]
# _filter_by_variant uniquifies the paths and returns a set
return sorted(_filter_by_variant(paths, variant))

View File

@ -116,7 +116,7 @@ def _convert_linear_layers_to_nf4(
child.in_features,
child.out_features,
bias=has_bias,
compute_dtype=torch.float16,
compute_dtype=compute_dtype,
compress_statistics=compress_statistics,
)
if has_bias: