Install sub directories with folders correctly, ensure consistent dtype of tensors in flux pipeline and vae

This commit is contained in:
Brandon Rising 2024-08-19 13:59:44 -04:00 committed by Brandon
parent 81f0886d6f
commit 4bd7fda694
5 changed files with 7 additions and 9 deletions

View File

@ -72,7 +72,7 @@ class FluxTextToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
t5_embeddings: torch.Tensor, t5_embeddings: torch.Tensor,
): ):
transformer_info = context.models.load(self.transformer.transformer) transformer_info = context.models.load(self.transformer.transformer)
inference_dtype = TorchDevice.choose_torch_dtype() inference_dtype = torch.bfloat16
# Prepare input noise. # Prepare input noise.
# TODO(ryand): Does the seed behave the same on different devices? Should we re-implement this to always use a # TODO(ryand): Does the seed behave the same on different devices? Should we re-implement this to always use a

View File

@ -783,8 +783,9 @@ class ModelInstallService(ModelInstallServiceBase):
# So what we do is to synthesize a folder named "sdxl-turbo_vae" here. # So what we do is to synthesize a folder named "sdxl-turbo_vae" here.
if subfolder: if subfolder:
top = Path(remote_files[0].path.parts[0]) # e.g. "sdxl-turbo/" top = Path(remote_files[0].path.parts[0]) # e.g. "sdxl-turbo/"
path_to_remove = top / subfolder.parts[-1] # sdxl-turbo/vae/ path_to_remove = top / subfolder # sdxl-turbo/vae/
path_to_add = Path(f"{top}_{subfolder}") subfolder_rename = subfolder.name.replace('/', '_').replace('\\', '_')
path_to_add = Path(f"{top}_{subfolder_rename}")
else: else:
path_to_remove = Path(".") path_to_remove = Path(".")
path_to_add = Path(".") path_to_add = Path(".")

View File

@ -64,8 +64,7 @@ class FluxVAELoader(GenericDiffusersLoader):
params = AutoEncoderParams(**filtered_data) params = AutoEncoderParams(**filtered_data)
with SilenceWarnings(): with SilenceWarnings():
model = load_class(params).to(self._torch_dtype) model = load_class(params)
# load_sft doesn't support torch.device
sd = load_file(model_path) sd = load_file(model_path)
model.load_state_dict(sd, strict=False, assign=True) model.load_state_dict(sd, strict=False, assign=True)
@ -203,8 +202,6 @@ class FluxBnbQuantizednf4bCheckpointModel(GenericDiffusersLoader):
with accelerate.init_empty_weights(): with accelerate.init_empty_weights():
model = load_class(params) model = load_class(params)
model = quantize_model_nf4(model, modules_to_not_convert=set(), compute_dtype=torch.bfloat16) model = quantize_model_nf4(model, modules_to_not_convert=set(), compute_dtype=torch.bfloat16)
# TODO(ryand): Right now, some of the weights are loaded in bfloat16. Think about how best to handle
# this on GPUs without bfloat16 support.
sd = load_file(model_path) sd = load_file(model_path)
model.load_state_dict(sd, strict=False, assign=True) model.load_state_dict(sd, strict=False, assign=True)
return model return model

View File

@ -69,7 +69,7 @@ def filter_files(
# limit search to subfolder if requested # limit search to subfolder if requested
if subfolder: if subfolder:
subfolder = root / subfolder subfolder = root / subfolder
paths = [x for x in paths if x.parent == Path(subfolder)] paths = [x for x in paths if Path(subfolder) in x.parents]
# _filter_by_variant uniquifies the paths and returns a set # _filter_by_variant uniquifies the paths and returns a set
return sorted(_filter_by_variant(paths, variant)) return sorted(_filter_by_variant(paths, variant))

View File

@ -116,7 +116,7 @@ def _convert_linear_layers_to_nf4(
child.in_features, child.in_features,
child.out_features, child.out_features,
bias=has_bias, bias=has_bias,
compute_dtype=torch.float16, compute_dtype=compute_dtype,
compress_statistics=compress_statistics, compress_statistics=compress_statistics,
) )
if has_bias: if has_bias: