Merge branch 'main' into nodes-stuff

This commit is contained in:
blessedcoolant
2023-07-19 02:37:50 +12:00
6 changed files with 47 additions and 16 deletions

View File

@ -306,7 +306,7 @@ class SDXLTextToLatentsInvocation(BaseInvocation):
add_time_ids = add_time_ids.to(device=unet.device, dtype=unet.dtype)
latents = latents.to(device=unet.device, dtype=unet.dtype)
with tqdm(total=self.steps) as progress_bar:
with tqdm(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
@ -352,7 +352,7 @@ class SDXLTextToLatentsInvocation(BaseInvocation):
add_time_ids = add_time_ids.to(device=unet.device, dtype=unet.dtype)
latents = latents.to(device=unet.device, dtype=unet.dtype)
with tqdm(total=self.steps) as progress_bar:
with tqdm(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
#latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
@ -416,6 +416,7 @@ class SDXLTextToLatentsInvocation(BaseInvocation):
#################
latents = latents.to("cpu")
torch.cuda.empty_cache()
name = f'{context.graph_execution_state_id}__{self.id}'
@ -653,6 +654,7 @@ class SDXLLatentsToLatentsInvocation(BaseInvocation):
#################
latents = latents.to("cpu")
torch.cuda.empty_cache()
name = f'{context.graph_execution_state_id}__{self.id}'